submit             26 arch/powerpc/include/asm/async_tx.h __async_tx_find_channel(struct async_submit_ctl *submit,
submit             34 crypto/async_tx/async_memcpy.c 	     struct async_submit_ctl *submit)
submit             36 crypto/async_tx/async_memcpy.c 	struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY,
submit             48 crypto/async_tx/async_memcpy.c 		if (submit->cb_fn)
submit             50 crypto/async_tx/async_memcpy.c 		if (submit->flags & ASYNC_TX_FENCE)
submit             70 crypto/async_tx/async_memcpy.c 		async_tx_submit(chan, tx, submit);
submit             76 crypto/async_tx/async_memcpy.c 		async_tx_quiesce(&submit->depend_tx);
submit             86 crypto/async_tx/async_memcpy.c 		async_tx_sync_epilog(submit);
submit             39 crypto/async_tx/async_pq.c 		      struct async_submit_ctl *submit)
submit             43 crypto/async_tx/async_pq.c 	enum async_tx_flags flags_orig = submit->flags;
submit             44 crypto/async_tx/async_pq.c 	dma_async_tx_callback cb_fn_orig = submit->cb_fn;
submit             45 crypto/async_tx/async_pq.c 	dma_async_tx_callback cb_param_orig = submit->cb_param;
submit             52 crypto/async_tx/async_pq.c 		submit->flags = flags_orig;
submit             59 crypto/async_tx/async_pq.c 			submit->flags &= ~ASYNC_TX_ACK;
submit             60 crypto/async_tx/async_pq.c 			submit->flags |= ASYNC_TX_FENCE;
submit             61 crypto/async_tx/async_pq.c 			submit->cb_fn = NULL;
submit             62 crypto/async_tx/async_pq.c 			submit->cb_param = NULL;
submit             64 crypto/async_tx/async_pq.c 			submit->cb_fn = cb_fn_orig;
submit             65 crypto/async_tx/async_pq.c 			submit->cb_param = cb_param_orig;
submit             69 crypto/async_tx/async_pq.c 		if (submit->flags & ASYNC_TX_FENCE)
submit             85 crypto/async_tx/async_pq.c 			async_tx_quiesce(&submit->depend_tx);
submit             90 crypto/async_tx/async_pq.c 		async_tx_submit(chan, tx, submit);
submit             91 crypto/async_tx/async_pq.c 		submit->depend_tx = tx;
submit            108 crypto/async_tx/async_pq.c 		     size_t len, struct async_submit_ctl *submit)
submit            114 crypto/async_tx/async_pq.c 	if (submit->scribble)
submit            115 crypto/async_tx/async_pq.c 		srcs = submit->scribble;
submit            132 crypto/async_tx/async_pq.c 	if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
submit            138 crypto/async_tx/async_pq.c 	async_tx_sync_epilog(submit);
submit            164 crypto/async_tx/async_pq.c 		   size_t len, struct async_submit_ctl *submit)
submit            167 crypto/async_tx/async_pq.c 	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
submit            179 crypto/async_tx/async_pq.c 	if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
submit            228 crypto/async_tx/async_pq.c 		tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
submit            239 crypto/async_tx/async_pq.c 	async_tx_quiesce(&submit->depend_tx);
submit            249 crypto/async_tx/async_pq.c 	do_sync_gen_syndrome(blocks, offset, disks, len, submit);
submit            256 crypto/async_tx/async_pq.c pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
submit            261 crypto/async_tx/async_pq.c 	return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0,  blocks,
submit            283 crypto/async_tx/async_pq.c 		   struct async_submit_ctl *submit)
submit            285 crypto/async_tx/async_pq.c 	struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
submit            289 crypto/async_tx/async_pq.c 	enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
submit            339 crypto/async_tx/async_pq.c 		if (submit->flags & ASYNC_TX_FENCE)
submit            350 crypto/async_tx/async_pq.c 			async_tx_quiesce(&submit->depend_tx);
submit            355 crypto/async_tx/async_pq.c 		async_tx_submit(chan, tx, submit);
submit            359 crypto/async_tx/async_pq.c 		enum async_tx_flags flags_orig = submit->flags;
submit            360 crypto/async_tx/async_pq.c 		dma_async_tx_callback cb_fn_orig = submit->cb_fn;
submit            361 crypto/async_tx/async_pq.c 		void *scribble = submit->scribble;
submit            362 crypto/async_tx/async_pq.c 		void *cb_param_orig = submit->cb_param;
submit            374 crypto/async_tx/async_pq.c 		async_tx_quiesce(&submit->depend_tx);
submit            382 crypto/async_tx/async_pq.c 			init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
submit            384 crypto/async_tx/async_pq.c 			tx = async_xor(spare, blocks, offset, disks-2, len, submit);
submit            394 crypto/async_tx/async_pq.c 			init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
submit            395 crypto/async_tx/async_pq.c 			tx = async_gen_syndrome(blocks, offset, disks, len, submit);
submit            406 crypto/async_tx/async_pq.c 		submit->cb_fn = cb_fn_orig;
submit            407 crypto/async_tx/async_pq.c 		submit->cb_param = cb_param_orig;
submit            408 crypto/async_tx/async_pq.c 		submit->flags = flags_orig;
submit            409 crypto/async_tx/async_pq.c 		async_tx_sync_epilog(submit);
submit             19 crypto/async_tx/async_raid6_recov.c 		  size_t len, struct async_submit_ctl *submit)
submit             21 crypto/async_tx/async_raid6_recov.c 	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
submit             38 crypto/async_tx/async_raid6_recov.c 		if (submit->flags & ASYNC_TX_FENCE)
submit             54 crypto/async_tx/async_raid6_recov.c 			async_tx_submit(chan, tx, submit);
submit             66 crypto/async_tx/async_raid6_recov.c 	async_tx_quiesce(&submit->depend_tx);
submit             84 crypto/async_tx/async_raid6_recov.c 	   struct async_submit_ctl *submit)
submit             86 crypto/async_tx/async_raid6_recov.c 	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
submit            102 crypto/async_tx/async_raid6_recov.c 		if (submit->flags & ASYNC_TX_FENCE)
submit            121 crypto/async_tx/async_raid6_recov.c 			async_tx_submit(chan, tx, submit);
submit            134 crypto/async_tx/async_raid6_recov.c 	async_tx_quiesce(&submit->depend_tx);
submit            147 crypto/async_tx/async_raid6_recov.c 		struct page **blocks, struct async_submit_ctl *submit)
submit            153 crypto/async_tx/async_raid6_recov.c 	enum async_tx_flags flags = submit->flags;
submit            154 crypto/async_tx/async_raid6_recov.c 	dma_async_tx_callback cb_fn = submit->cb_fn;
submit            155 crypto/async_tx/async_raid6_recov.c 	void *cb_param = submit->cb_param;
submit            156 crypto/async_tx/async_raid6_recov.c 	void *scribble = submit->scribble;
submit            170 crypto/async_tx/async_raid6_recov.c 	init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
submit            171 crypto/async_tx/async_raid6_recov.c 	tx = async_sum_product(b, srcs, coef, bytes, submit);
submit            176 crypto/async_tx/async_raid6_recov.c 	init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn,
submit            178 crypto/async_tx/async_raid6_recov.c 	tx = async_xor(a, srcs, 0, 2, bytes, submit);
submit            186 crypto/async_tx/async_raid6_recov.c 		struct page **blocks, struct async_submit_ctl *submit)
submit            192 crypto/async_tx/async_raid6_recov.c 	enum async_tx_flags flags = submit->flags;
submit            193 crypto/async_tx/async_raid6_recov.c 	dma_async_tx_callback cb_fn = submit->cb_fn;
submit            194 crypto/async_tx/async_raid6_recov.c 	void *cb_param = submit->cb_param;
submit            195 crypto/async_tx/async_raid6_recov.c 	void *scribble = submit->scribble;
submit            221 crypto/async_tx/async_raid6_recov.c 	init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
submit            222 crypto/async_tx/async_raid6_recov.c 	tx = async_memcpy(dp, g, 0, 0, bytes, submit);
submit            223 crypto/async_tx/async_raid6_recov.c 	init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
submit            224 crypto/async_tx/async_raid6_recov.c 	tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
submit            229 crypto/async_tx/async_raid6_recov.c 	init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
submit            231 crypto/async_tx/async_raid6_recov.c 	tx = async_xor(dp, srcs, 0, 2, bytes, submit);
submit            236 crypto/async_tx/async_raid6_recov.c 	init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
submit            238 crypto/async_tx/async_raid6_recov.c 	tx = async_xor(dq, srcs, 0, 2, bytes, submit);
submit            245 crypto/async_tx/async_raid6_recov.c 	init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
submit            246 crypto/async_tx/async_raid6_recov.c 	tx = async_sum_product(dq, srcs, coef, bytes, submit);
submit            251 crypto/async_tx/async_raid6_recov.c 	init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
submit            253 crypto/async_tx/async_raid6_recov.c 	tx = async_xor(dp, srcs, 0, 2, bytes, submit);
submit            260 crypto/async_tx/async_raid6_recov.c 	      struct page **blocks, struct async_submit_ctl *submit)
submit            266 crypto/async_tx/async_raid6_recov.c 	enum async_tx_flags flags = submit->flags;
submit            267 crypto/async_tx/async_raid6_recov.c 	dma_async_tx_callback cb_fn = submit->cb_fn;
submit            268 crypto/async_tx/async_raid6_recov.c 	void *cb_param = submit->cb_param;
submit            269 crypto/async_tx/async_raid6_recov.c 	void *scribble = submit->scribble;
submit            285 crypto/async_tx/async_raid6_recov.c 	init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
submit            286 crypto/async_tx/async_raid6_recov.c 	tx = async_gen_syndrome(blocks, 0, disks, bytes, submit);
submit            297 crypto/async_tx/async_raid6_recov.c 	init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
submit            299 crypto/async_tx/async_raid6_recov.c 	tx = async_xor(dp, srcs, 0, 2, bytes, submit);
submit            304 crypto/async_tx/async_raid6_recov.c 	init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
submit            306 crypto/async_tx/async_raid6_recov.c 	tx = async_xor(dq, srcs, 0, 2, bytes, submit);
submit            313 crypto/async_tx/async_raid6_recov.c 	init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
submit            314 crypto/async_tx/async_raid6_recov.c 	tx = async_sum_product(dq, srcs, coef, bytes, submit);
submit            319 crypto/async_tx/async_raid6_recov.c 	init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
submit            321 crypto/async_tx/async_raid6_recov.c 	tx = async_xor(dp, srcs, 0, 2, bytes, submit);
submit            337 crypto/async_tx/async_raid6_recov.c 			struct page **blocks, struct async_submit_ctl *submit)
submit            339 crypto/async_tx/async_raid6_recov.c 	void *scribble = submit->scribble;
submit            356 crypto/async_tx/async_raid6_recov.c 		async_tx_quiesce(&submit->depend_tx);
submit            365 crypto/async_tx/async_raid6_recov.c 		async_tx_sync_epilog(submit);
submit            386 crypto/async_tx/async_raid6_recov.c 		return __2data_recov_4(disks, bytes, faila, failb, blocks, submit);
submit            393 crypto/async_tx/async_raid6_recov.c 		return __2data_recov_5(disks, bytes, faila, failb, blocks, submit);
submit            395 crypto/async_tx/async_raid6_recov.c 		return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
submit            410 crypto/async_tx/async_raid6_recov.c 			struct page **blocks, struct async_submit_ctl *submit)
submit            415 crypto/async_tx/async_raid6_recov.c 	enum async_tx_flags flags = submit->flags;
submit            416 crypto/async_tx/async_raid6_recov.c 	dma_async_tx_callback cb_fn = submit->cb_fn;
submit            417 crypto/async_tx/async_raid6_recov.c 	void *cb_param = submit->cb_param;
submit            418 crypto/async_tx/async_raid6_recov.c 	void *scribble = submit->scribble;
submit            432 crypto/async_tx/async_raid6_recov.c 		async_tx_quiesce(&submit->depend_tx);
submit            441 crypto/async_tx/async_raid6_recov.c 		async_tx_sync_epilog(submit);
submit            476 crypto/async_tx/async_raid6_recov.c 		init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
submit            478 crypto/async_tx/async_raid6_recov.c 		tx = async_memcpy(p, g, 0, 0, bytes, submit);
submit            480 crypto/async_tx/async_raid6_recov.c 		init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
submit            482 crypto/async_tx/async_raid6_recov.c 		tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
submit            484 crypto/async_tx/async_raid6_recov.c 		init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
submit            486 crypto/async_tx/async_raid6_recov.c 		tx = async_gen_syndrome(blocks, 0, disks, bytes, submit);
submit            498 crypto/async_tx/async_raid6_recov.c 	init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
submit            500 crypto/async_tx/async_raid6_recov.c 	tx = async_xor(dq, srcs, 0, 2, bytes, submit);
submit            502 crypto/async_tx/async_raid6_recov.c 	init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
submit            503 crypto/async_tx/async_raid6_recov.c 	tx = async_mult(dq, dq, coef, bytes, submit);
submit            507 crypto/async_tx/async_raid6_recov.c 	init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
submit            509 crypto/async_tx/async_raid6_recov.c 	tx = async_xor(p, srcs, 0, 2, bytes, submit);
submit             43 crypto/async_tx/async_tx.c __async_tx_find_channel(struct async_submit_ctl *submit,
submit             46 crypto/async_tx/async_tx.c 	struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
submit            144 crypto/async_tx/async_tx.c 		struct async_submit_ctl *submit)
submit            146 crypto/async_tx/async_tx.c 	struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
submit            148 crypto/async_tx/async_tx.c 	tx->callback = submit->cb_fn;
submit            149 crypto/async_tx/async_tx.c 	tx->callback_param = submit->cb_param;
submit            204 crypto/async_tx/async_tx.c 	if (submit->flags & ASYNC_TX_ACK)
submit            221 crypto/async_tx/async_tx.c async_trigger_callback(struct async_submit_ctl *submit)
submit            226 crypto/async_tx/async_tx.c 	struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
submit            245 crypto/async_tx/async_tx.c 		async_tx_submit(chan, tx, submit);
submit            250 crypto/async_tx/async_tx.c 		async_tx_quiesce(&submit->depend_tx);
submit            252 crypto/async_tx/async_tx.c 		async_tx_sync_epilog(submit);
submit             24 crypto/async_tx/async_xor.c 	     struct async_submit_ctl *submit)
submit             28 crypto/async_tx/async_xor.c 	dma_async_tx_callback cb_fn_orig = submit->cb_fn;
submit             29 crypto/async_tx/async_xor.c 	void *cb_param_orig = submit->cb_param;
submit             30 crypto/async_tx/async_xor.c 	enum async_tx_flags flags_orig = submit->flags;
submit             40 crypto/async_tx/async_xor.c 		submit->flags = flags_orig;
submit             46 crypto/async_tx/async_xor.c 			submit->flags &= ~ASYNC_TX_ACK;
submit             47 crypto/async_tx/async_xor.c 			submit->flags |= ASYNC_TX_FENCE;
submit             48 crypto/async_tx/async_xor.c 			submit->cb_fn = NULL;
submit             49 crypto/async_tx/async_xor.c 			submit->cb_param = NULL;
submit             51 crypto/async_tx/async_xor.c 			submit->cb_fn = cb_fn_orig;
submit             52 crypto/async_tx/async_xor.c 			submit->cb_param = cb_param_orig;
submit             54 crypto/async_tx/async_xor.c 		if (submit->cb_fn)
submit             56 crypto/async_tx/async_xor.c 		if (submit->flags & ASYNC_TX_FENCE)
submit             70 crypto/async_tx/async_xor.c 			async_tx_quiesce(&submit->depend_tx);
submit             83 crypto/async_tx/async_xor.c 		async_tx_submit(chan, tx, submit);
submit             84 crypto/async_tx/async_xor.c 		submit->depend_tx = tx;
submit            101 crypto/async_tx/async_xor.c 	    int src_cnt, size_t len, struct async_submit_ctl *submit)
submit            109 crypto/async_tx/async_xor.c 	if (submit->scribble)
submit            110 crypto/async_tx/async_xor.c 		srcs = submit->scribble;
submit            122 crypto/async_tx/async_xor.c 	if (submit->flags & ASYNC_TX_XOR_ZERO_DST)
submit            135 crypto/async_tx/async_xor.c 	async_tx_sync_epilog(submit);
submit            161 crypto/async_tx/async_xor.c 	  int src_cnt, size_t len, struct async_submit_ctl *submit)
submit            163 crypto/async_tx/async_xor.c 	struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
submit            195 crypto/async_tx/async_xor.c 		tx = do_async_xor(chan, unmap, submit);
submit            208 crypto/async_tx/async_xor.c 		if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
submit            214 crypto/async_tx/async_xor.c 		async_tx_quiesce(&submit->depend_tx);
submit            216 crypto/async_tx/async_xor.c 		do_sync_xor(dest, src_list, offset, src_cnt, len, submit);
submit            229 crypto/async_tx/async_xor.c xor_val_chan(struct async_submit_ctl *submit, struct page *dest,
submit            235 crypto/async_tx/async_xor.c 	return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list,
submit            258 crypto/async_tx/async_xor.c 	      struct async_submit_ctl *submit)
submit            260 crypto/async_tx/async_xor.c 	struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
submit            277 crypto/async_tx/async_xor.c 		if (submit->cb_fn)
submit            279 crypto/async_tx/async_xor.c 		if (submit->flags & ASYNC_TX_FENCE)
submit            293 crypto/async_tx/async_xor.c 			async_tx_quiesce(&submit->depend_tx);
submit            303 crypto/async_tx/async_xor.c 		async_tx_submit(chan, tx, submit);
submit            305 crypto/async_tx/async_xor.c 		enum async_tx_flags flags_orig = submit->flags;
submit            312 crypto/async_tx/async_xor.c 		submit->flags |= ASYNC_TX_XOR_DROP_DST;
submit            313 crypto/async_tx/async_xor.c 		submit->flags &= ~ASYNC_TX_ACK;
submit            315 crypto/async_tx/async_xor.c 		tx = async_xor(dest, src_list, offset, src_cnt, len, submit);
submit            321 crypto/async_tx/async_xor.c 		async_tx_sync_epilog(submit);
submit            322 crypto/async_tx/async_xor.c 		submit->flags = flags_orig;
submit             57 crypto/async_tx/raid6test.c 	struct async_submit_ctl submit;
submit             68 crypto/async_tx/raid6test.c 			init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
submit             69 crypto/async_tx/raid6test.c 			tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
submit             87 crypto/async_tx/raid6test.c 			init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL,
submit             89 crypto/async_tx/raid6test.c 			tx = async_xor(dest, blocks, 0, count, bytes, &submit);
submit             91 crypto/async_tx/raid6test.c 			init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv);
submit             92 crypto/async_tx/raid6test.c 			tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
submit             97 crypto/async_tx/raid6test.c 			init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
submit             98 crypto/async_tx/raid6test.c 			tx = async_raid6_datap_recov(disks, bytes, faila, ptrs, &submit);
submit            101 crypto/async_tx/raid6test.c 			init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
submit            102 crypto/async_tx/raid6test.c 			tx = async_raid6_2data_recov(disks, bytes, faila, failb, ptrs, &submit);
submit            106 crypto/async_tx/raid6test.c 	init_async_submit(&submit, ASYNC_TX_ACK, tx, callback, &cmp, addr_conv);
submit            107 crypto/async_tx/raid6test.c 	tx = async_syndrome_val(ptrs, 0, disks, bytes, &result, spare, &submit);
submit            147 crypto/async_tx/raid6test.c 	struct async_submit_ctl submit;
submit            164 crypto/async_tx/raid6test.c 	init_async_submit(&submit, ASYNC_TX_ACK, NULL, callback, &cmp, addr_conv);
submit            165 crypto/async_tx/raid6test.c 	tx = async_gen_syndrome(dataptrs, 0, disks, PAGE_SIZE, &submit);
submit            971 drivers/block/drbd/drbd_int.h 	struct submit_worker submit;
submit           2765 drivers/block/drbd/drbd_main.c 	device->submit.wq =
submit           2767 drivers/block/drbd/drbd_main.c 	if (!device->submit.wq)
submit           2770 drivers/block/drbd/drbd_main.c 	INIT_WORK(&device->submit.worker, do_submit);
submit           2771 drivers/block/drbd/drbd_main.c 	INIT_LIST_HEAD(&device->submit.writes);
submit           2911 drivers/block/drbd/drbd_receiver.c 		goto submit;
submit           3027 drivers/block/drbd/drbd_receiver.c submit:
submit           1194 drivers/block/drbd/drbd_req.c 	list_add_tail(&req->tl_requests, &device->submit.writes);
submit           1198 drivers/block/drbd/drbd_req.c 	queue_work(device->submit.wq, &device->submit.worker);
submit           1517 drivers/block/drbd/drbd_req.c 	struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
submit           1524 drivers/block/drbd/drbd_req.c 	list_splice_tail_init(&device->submit.writes, &incoming);
submit           1564 drivers/block/drbd/drbd_req.c 			list_splice_tail_init(&device->submit.writes, &incoming);
submit           1592 drivers/block/drbd/drbd_req.c 			if (list_empty(&device->submit.writes))
submit           1596 drivers/block/drbd/drbd_req.c 			list_splice_tail_init(&device->submit.writes, &more_incoming);
submit            119 drivers/dma/ti/cppi41.c 	u16 submit;
submit            157 drivers/dma/ti/cppi41.c 	[ 0] = { .submit = 32, .complete =  93},
submit            158 drivers/dma/ti/cppi41.c 	[ 1] = { .submit = 34, .complete =  94},
submit            159 drivers/dma/ti/cppi41.c 	[ 2] = { .submit = 36, .complete =  95},
submit            160 drivers/dma/ti/cppi41.c 	[ 3] = { .submit = 38, .complete =  96},
submit            161 drivers/dma/ti/cppi41.c 	[ 4] = { .submit = 40, .complete =  97},
submit            162 drivers/dma/ti/cppi41.c 	[ 5] = { .submit = 42, .complete =  98},
submit            163 drivers/dma/ti/cppi41.c 	[ 6] = { .submit = 44, .complete =  99},
submit            164 drivers/dma/ti/cppi41.c 	[ 7] = { .submit = 46, .complete = 100},
submit            165 drivers/dma/ti/cppi41.c 	[ 8] = { .submit = 48, .complete = 101},
submit            166 drivers/dma/ti/cppi41.c 	[ 9] = { .submit = 50, .complete = 102},
submit            167 drivers/dma/ti/cppi41.c 	[10] = { .submit = 52, .complete = 103},
submit            168 drivers/dma/ti/cppi41.c 	[11] = { .submit = 54, .complete = 104},
submit            169 drivers/dma/ti/cppi41.c 	[12] = { .submit = 56, .complete = 105},
submit            170 drivers/dma/ti/cppi41.c 	[13] = { .submit = 58, .complete = 106},
submit            171 drivers/dma/ti/cppi41.c 	[14] = { .submit = 60, .complete = 107},
submit            174 drivers/dma/ti/cppi41.c 	[15] = { .submit = 62, .complete = 125},
submit            175 drivers/dma/ti/cppi41.c 	[16] = { .submit = 64, .complete = 126},
submit            176 drivers/dma/ti/cppi41.c 	[17] = { .submit = 66, .complete = 127},
submit            177 drivers/dma/ti/cppi41.c 	[18] = { .submit = 68, .complete = 128},
submit            178 drivers/dma/ti/cppi41.c 	[19] = { .submit = 70, .complete = 129},
submit            179 drivers/dma/ti/cppi41.c 	[20] = { .submit = 72, .complete = 130},
submit            180 drivers/dma/ti/cppi41.c 	[21] = { .submit = 74, .complete = 131},
submit            181 drivers/dma/ti/cppi41.c 	[22] = { .submit = 76, .complete = 132},
submit            182 drivers/dma/ti/cppi41.c 	[23] = { .submit = 78, .complete = 133},
submit            183 drivers/dma/ti/cppi41.c 	[24] = { .submit = 80, .complete = 134},
submit            184 drivers/dma/ti/cppi41.c 	[25] = { .submit = 82, .complete = 135},
submit            185 drivers/dma/ti/cppi41.c 	[26] = { .submit = 84, .complete = 136},
submit            186 drivers/dma/ti/cppi41.c 	[27] = { .submit = 86, .complete = 137},
submit            187 drivers/dma/ti/cppi41.c 	[28] = { .submit = 88, .complete = 138},
submit            188 drivers/dma/ti/cppi41.c 	[29] = { .submit = 90, .complete = 139},
submit            193 drivers/dma/ti/cppi41.c 	[ 0] = { .submit =  1, .complete = 109},
submit            194 drivers/dma/ti/cppi41.c 	[ 1] = { .submit =  2, .complete = 110},
submit            195 drivers/dma/ti/cppi41.c 	[ 2] = { .submit =  3, .complete = 111},
submit            196 drivers/dma/ti/cppi41.c 	[ 3] = { .submit =  4, .complete = 112},
submit            197 drivers/dma/ti/cppi41.c 	[ 4] = { .submit =  5, .complete = 113},
submit            198 drivers/dma/ti/cppi41.c 	[ 5] = { .submit =  6, .complete = 114},
submit            199 drivers/dma/ti/cppi41.c 	[ 6] = { .submit =  7, .complete = 115},
submit            200 drivers/dma/ti/cppi41.c 	[ 7] = { .submit =  8, .complete = 116},
submit            201 drivers/dma/ti/cppi41.c 	[ 8] = { .submit =  9, .complete = 117},
submit            202 drivers/dma/ti/cppi41.c 	[ 9] = { .submit = 10, .complete = 118},
submit            203 drivers/dma/ti/cppi41.c 	[10] = { .submit = 11, .complete = 119},
submit            204 drivers/dma/ti/cppi41.c 	[11] = { .submit = 12, .complete = 120},
submit            205 drivers/dma/ti/cppi41.c 	[12] = { .submit = 13, .complete = 121},
submit            206 drivers/dma/ti/cppi41.c 	[13] = { .submit = 14, .complete = 122},
submit            207 drivers/dma/ti/cppi41.c 	[14] = { .submit = 15, .complete = 123},
submit            210 drivers/dma/ti/cppi41.c 	[15] = { .submit = 16, .complete = 141},
submit            211 drivers/dma/ti/cppi41.c 	[16] = { .submit = 17, .complete = 142},
submit            212 drivers/dma/ti/cppi41.c 	[17] = { .submit = 18, .complete = 143},
submit            213 drivers/dma/ti/cppi41.c 	[18] = { .submit = 19, .complete = 144},
submit            214 drivers/dma/ti/cppi41.c 	[19] = { .submit = 20, .complete = 145},
submit            215 drivers/dma/ti/cppi41.c 	[20] = { .submit = 21, .complete = 146},
submit            216 drivers/dma/ti/cppi41.c 	[21] = { .submit = 22, .complete = 147},
submit            217 drivers/dma/ti/cppi41.c 	[22] = { .submit = 23, .complete = 148},
submit            218 drivers/dma/ti/cppi41.c 	[23] = { .submit = 24, .complete = 149},
submit            219 drivers/dma/ti/cppi41.c 	[24] = { .submit = 25, .complete = 150},
submit            220 drivers/dma/ti/cppi41.c 	[25] = { .submit = 26, .complete = 151},
submit            221 drivers/dma/ti/cppi41.c 	[26] = { .submit = 27, .complete = 152},
submit            222 drivers/dma/ti/cppi41.c 	[27] = { .submit = 28, .complete = 153},
submit            223 drivers/dma/ti/cppi41.c 	[28] = { .submit = 29, .complete = 154},
submit            224 drivers/dma/ti/cppi41.c 	[29] = { .submit = 30, .complete = 155},
submit            228 drivers/dma/ti/cppi41.c 	[0] = { .submit =  16, .complete = 24},
submit            229 drivers/dma/ti/cppi41.c 	[1] = { .submit =  18, .complete = 24},
submit            230 drivers/dma/ti/cppi41.c 	[2] = { .submit =  20, .complete = 24},
submit            231 drivers/dma/ti/cppi41.c 	[3] = { .submit =  22, .complete = 24},
submit            235 drivers/dma/ti/cppi41.c 	[0] = { .submit =  1, .complete = 26},
submit            236 drivers/dma/ti/cppi41.c 	[1] = { .submit =  3, .complete = 26},
submit            237 drivers/dma/ti/cppi41.c 	[2] = { .submit =  5, .complete = 26},
submit            238 drivers/dma/ti/cppi41.c 	[3] = { .submit =  7, .complete = 26},
submit            663 drivers/dma/ti/cppi41.c 				QMGR_QUEUE_D(cdd->td_queue.submit));
submit            927 drivers/dma/ti/cppi41.c 	cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ);
submit            974 drivers/dma/ti/cppi41.c 	cchan->q_num = queues[cchan->port_num].submit;
submit           1002 drivers/dma/ti/cppi41.c 	.td_queue = { .submit = 31, .complete = 0 },
submit           1010 drivers/dma/ti/cppi41.c 	.td_queue = { .submit = 31, .complete = 0 },
submit            113 drivers/gpu/drm/etnaviv/etnaviv_dump.c void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
submit            115 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	struct etnaviv_gpu *gpu = submit->gpu;
submit            137 drivers/gpu/drm/etnaviv/etnaviv_dump.c 		    mmu_size + gpu->buffer.size + submit->cmdbuf.size;
submit            140 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	for (i = 0; i < submit->nr_bos; i++) {
submit            141 drivers/gpu/drm/etnaviv/etnaviv_dump.c 		obj = submit->bos[i].obj;
submit            179 drivers/gpu/drm/etnaviv/etnaviv_dump.c 			      submit->cmdbuf.vaddr, submit->cmdbuf.size,
submit            180 drivers/gpu/drm/etnaviv/etnaviv_dump.c 			      etnaviv_cmdbuf_get_va(&submit->cmdbuf,
submit            196 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	for (i = 0; i < submit->nr_bos; i++) {
submit            201 drivers/gpu/drm/etnaviv/etnaviv_dump.c 		obj = submit->bos[i].obj;
submit            202 drivers/gpu/drm/etnaviv/etnaviv_dump.c 		vram = submit->bos[i].mapping;
submit             39 drivers/gpu/drm/etnaviv/etnaviv_dump.h void etnaviv_core_dump(struct etnaviv_gem_submit *submit);
submit            112 drivers/gpu/drm/etnaviv/etnaviv_gem.h void etnaviv_submit_put(struct etnaviv_gem_submit * submit);
submit             34 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	struct etnaviv_gem_submit *submit;
submit             35 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit));
submit             37 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	submit = kzalloc(sz, GFP_KERNEL);
submit             38 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	if (!submit)
submit             41 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request),
submit             43 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	if (!submit->pmrs) {
submit             44 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		kfree(submit);
submit             47 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	submit->nr_pmrs = nr_pmrs;
submit             49 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	submit->gpu = gpu;
submit             50 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	kref_init(&submit->refcount);
submit             52 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	return submit;
submit             55 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
submit             74 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		submit->bos[i].flags = bo->flags;
submit             75 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		if (submit->flags & ETNA_SUBMIT_SOFTPIN) {
submit             81 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 			submit->bos[i].va = bo->presumed;
submit            101 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		submit->bos[i].obj = to_etnaviv_bo(obj);
submit            105 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	submit->nr_bos = i;
submit            111 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
submit            113 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	if (submit->bos[i].flags & BO_LOCKED) {
submit            114 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		struct drm_gem_object *obj = &submit->bos[i].obj->base;
submit            117 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		submit->bos[i].flags &= ~BO_LOCKED;
submit            121 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c static int submit_lock_objects(struct etnaviv_gem_submit *submit,
submit            127 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	for (i = 0; i < submit->nr_bos; i++) {
submit            128 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		struct drm_gem_object *obj = &submit->bos[i].obj->base;
submit            135 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		if (!(submit->bos[i].flags & BO_LOCKED)) {
submit            143 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 			submit->bos[i].flags |= BO_LOCKED;
submit            153 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		submit_unlock_object(submit, i);
submit            156 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		submit_unlock_object(submit, slow_locked);
submit            161 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		obj = &submit->bos[contended].obj->base;
submit            167 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 			submit->bos[contended].flags |= BO_LOCKED;
submit            176 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c static int submit_fence_sync(struct etnaviv_gem_submit *submit)
submit            180 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	for (i = 0; i < submit->nr_bos; i++) {
submit            181 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
submit            190 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
submit            208 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
submit            212 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	for (i = 0; i < submit->nr_bos; i++) {
submit            213 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		struct drm_gem_object *obj = &submit->bos[i].obj->base;
submit            215 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
submit            217 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 							  submit->out_fence);
submit            220 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 							    submit->out_fence);
submit            222 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		submit_unlock_object(submit, i);
submit            226 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c static int submit_pin_objects(struct etnaviv_gem_submit *submit)
submit            230 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	for (i = 0; i < submit->nr_bos; i++) {
submit            231 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
submit            235 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 						  submit->mmu_context,
submit            236 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 						  submit->bos[i].va);
submit            242 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		if ((submit->flags & ETNA_SUBMIT_SOFTPIN) &&
submit            243 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		     submit->bos[i].va != mapping->iova) {
submit            250 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		submit->bos[i].flags |= BO_PINNED;
submit            251 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		submit->bos[i].mapping = mapping;
submit            257 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
submit            260 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	if (idx >= submit->nr_bos) {
submit            262 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 				idx, submit->nr_bos);
submit            266 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	*bo = &submit->bos[idx];
submit            272 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
submit            281 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && nr_relocs != 0)
submit            309 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		ret = submit_bo(submit, r->reloc_idx, &bo);
submit            326 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
submit            331 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	for (i = 0; i < submit->nr_pmrs; i++) {
submit            336 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		ret = submit_bo(submit, r->read_idx, &bo);
submit            361 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		submit->pmrs[i].flags = r->flags;
submit            362 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		submit->pmrs[i].domain = r->domain;
submit            363 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		submit->pmrs[i].signal = r->signal;
submit            364 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		submit->pmrs[i].sequence = r->sequence;
submit            365 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		submit->pmrs[i].offset = r->read_offset;
submit            366 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		submit->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
submit            374 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	struct etnaviv_gem_submit *submit =
submit            378 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	if (submit->runtime_resumed)
submit            379 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		pm_runtime_put_autosuspend(submit->gpu->dev);
submit            381 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	if (submit->cmdbuf.suballoc)
submit            382 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		etnaviv_cmdbuf_free(&submit->cmdbuf);
submit            384 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	if (submit->mmu_context)
submit            385 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		etnaviv_iommu_context_put(submit->mmu_context);
submit            387 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	if (submit->prev_mmu_context)
submit            388 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		etnaviv_iommu_context_put(submit->prev_mmu_context);
submit            390 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	for (i = 0; i < submit->nr_bos; i++) {
submit            391 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
submit            394 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		if (submit->bos[i].flags & BO_PINNED) {
submit            395 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 			etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
submit            397 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 			submit->bos[i].mapping = NULL;
submit            398 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 			submit->bos[i].flags &= ~BO_PINNED;
submit            402 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		submit_unlock_object(submit, i);
submit            406 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	wake_up_all(&submit->gpu->fence_event);
submit            408 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	if (submit->in_fence)
submit            409 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		dma_fence_put(submit->in_fence);
submit            410 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	if (submit->out_fence) {
submit            412 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		mutex_lock(&submit->gpu->fence_lock);
submit            413 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
submit            414 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		mutex_unlock(&submit->gpu->fence_lock);
submit            415 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		dma_fence_put(submit->out_fence);
submit            417 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	kfree(submit->pmrs);
submit            418 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	kfree(submit);
submit            421 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
submit            423 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	kref_put(&submit->refcount, submit_cleanup);
submit            435 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	struct etnaviv_gem_submit *submit;
submit            525 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	submit = submit_create(dev, gpu, args->nr_bos, args->nr_pmrs);
submit            526 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	if (!submit) {
submit            531 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
submit            536 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	submit->ctx = file->driver_priv;
submit            537 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	etnaviv_iommu_context_get(submit->ctx->mmu);
submit            538 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	submit->mmu_context = submit->ctx->mmu;
submit            539 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	submit->exec_state = args->exec_state;
submit            540 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	submit->flags = args->flags;
submit            542 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
submit            554 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		submit->in_fence = sync_file_get_fence(args->fence_fd);
submit            555 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		if (!submit->in_fence) {
submit            561 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	ret = submit_pin_objects(submit);
submit            565 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	ret = submit_reloc(submit, stream, args->stream_size / 4,
submit            570 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
submit            574 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
submit            576 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	ret = submit_lock_objects(submit, &ticket);
submit            580 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	ret = submit_fence_sync(submit);
submit            584 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	ret = etnaviv_sched_push_job(&ctx->sched_entity[args->pipe], submit);
submit            588 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	submit_attach_object_fences(submit);
submit            597 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		sync_file = sync_file_create(submit->out_fence);
submit            606 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	args->fence = submit->out_fence_id;
submit            609 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	etnaviv_submit_put(submit);
submit           1206 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	const struct etnaviv_gem_submit *submit = event->submit;
submit           1209 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	for (i = 0; i < submit->nr_pmrs; i++) {
submit           1210 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 		const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
submit           1213 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 			etnaviv_perfmon_process(gpu, pmr, submit->exec_state);
submit           1238 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	const struct etnaviv_gem_submit *submit = event->submit;
submit           1244 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	for (i = 0; i < submit->nr_pmrs; i++) {
submit           1245 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 		const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
submit           1263 drivers/gpu/drm/etnaviv/etnaviv_gpu.c struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
submit           1265 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	struct etnaviv_gpu *gpu = submit->gpu;
submit           1270 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	if (!submit->runtime_resumed) {
submit           1274 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 		submit->runtime_resumed = true;
submit           1284 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	if (submit->nr_pmrs)
submit           1304 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 		etnaviv_iommu_context_get(submit->mmu_context);
submit           1305 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 		gpu->mmu_context = submit->mmu_context;
submit           1309 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 		submit->prev_mmu_context = gpu->mmu_context;
submit           1312 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	if (submit->nr_pmrs) {
submit           1314 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 		kref_get(&submit->refcount);
submit           1315 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 		gpu->event[event[1]].submit = submit;
submit           1320 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	submit->cmdbuf.user_size = submit->cmdbuf.size - 8;
submit           1321 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	etnaviv_buffer_queue(gpu, submit->exec_state, submit->mmu_context,
submit           1322 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 			     event[0], &submit->cmdbuf);
submit           1324 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	if (submit->nr_pmrs) {
submit           1326 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 		kref_get(&submit->refcount);
submit           1327 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 		gpu->event[event[2]].submit = submit;
submit           1345 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	etnaviv_submit_put(event->submit);
submit             83 drivers/gpu/drm/etnaviv/etnaviv_gpu.h 	struct etnaviv_gem_submit *submit;
submit            175 drivers/gpu/drm/etnaviv/etnaviv_gpu.h struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
submit             24 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
submit             28 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	if (unlikely(submit->in_fence)) {
submit             29 drivers/gpu/drm/etnaviv/etnaviv_sched.c 		fence = submit->in_fence;
submit             30 drivers/gpu/drm/etnaviv/etnaviv_sched.c 		submit->in_fence = NULL;
submit             38 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	for (i = 0; i < submit->nr_bos; i++) {
submit             39 drivers/gpu/drm/etnaviv/etnaviv_sched.c 		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
submit             74 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
submit             78 drivers/gpu/drm/etnaviv/etnaviv_sched.c 		fence = etnaviv_gpu_submit(submit);
submit             80 drivers/gpu/drm/etnaviv/etnaviv_sched.c 		dev_dbg(submit->gpu->dev, "skipping bad job\n");
submit             87 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
submit             88 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	struct etnaviv_gpu *gpu = submit->gpu;
submit             96 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	if (dma_fence_is_signaled(submit->out_fence))
submit            118 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	etnaviv_core_dump(submit);
submit            129 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
submit            133 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	etnaviv_submit_put(submit);
submit            144 drivers/gpu/drm/etnaviv/etnaviv_sched.c 			   struct etnaviv_gem_submit *submit)
submit            153 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	mutex_lock(&submit->gpu->fence_lock);
submit            155 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	ret = drm_sched_job_init(&submit->sched_job, sched_entity,
submit            156 drivers/gpu/drm/etnaviv/etnaviv_sched.c 				 submit->ctx);
submit            160 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
submit            161 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
submit            162 drivers/gpu/drm/etnaviv/etnaviv_sched.c 						submit->out_fence, 0,
submit            164 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	if (submit->out_fence_id < 0) {
submit            165 drivers/gpu/drm/etnaviv/etnaviv_sched.c 		drm_sched_job_cleanup(&submit->sched_job);
submit            171 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	kref_get(&submit->refcount);
submit            173 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	drm_sched_entity_push_job(&submit->sched_job, sched_entity);
submit            176 drivers/gpu/drm/etnaviv/etnaviv_sched.c 	mutex_unlock(&submit->gpu->fence_lock);
submit             22 drivers/gpu/drm/etnaviv/etnaviv_sched.h 			   struct etnaviv_gem_submit *submit);
submit           1045 drivers/gpu/drm/i915/gt/intel_lrc.c 	bool submit = false;
submit           1245 drivers/gpu/drm/i915/gt/intel_lrc.c 				submit = true;
submit           1257 drivers/gpu/drm/i915/gt/intel_lrc.c 			if (!submit) {
submit           1329 drivers/gpu/drm/i915/gt/intel_lrc.c 				submit = true;
submit           1358 drivers/gpu/drm/i915/gt/intel_lrc.c 		  yesno(submit));
submit           1360 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (submit) {
submit            998 drivers/gpu/drm/i915/gt/selftest_lrc.c 	i915_sw_fence_init(&rq->submit, dummy_notify);
submit           1011 drivers/gpu/drm/i915/gt/selftest_lrc.c 	i915_sw_fence_commit(&dummy->submit);
submit           1017 drivers/gpu/drm/i915/gt/selftest_lrc.c 	i915_sw_fence_fini(&dummy->submit);
submit           2019 drivers/gpu/drm/i915/gt/selftest_lrc.c 			err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
submit            544 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	bool submit = false;
submit            579 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 			submit = true;
submit            589 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	if (submit) {
submit            108 drivers/gpu/drm/i915/i915_request.c 	i915_sw_fence_fini(&rq->submit);
submit            135 drivers/gpu/drm/i915/i915_request.c 	cb->hook(container_of(cb->fence, struct i915_request, submit),
submit            231 drivers/gpu/drm/i915/i915_request.c 	GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
submit            354 drivers/gpu/drm/i915/i915_request.c 	cb->fence = &rq->submit;
submit            533 drivers/gpu/drm/i915/i915_request.c 		container_of(fence, typeof(*request), submit);
submit            705 drivers/gpu/drm/i915/i915_request.c 	i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
submit            808 drivers/gpu/drm/i915/i915_request.c 	return i915_sw_fence_await_dma_fence(&rq->submit,
submit            845 drivers/gpu/drm/i915/i915_request.c 		return i915_sw_fence_await_dma_fence(&to->submit,
submit            898 drivers/gpu/drm/i915/i915_request.c 		i915_sw_fence_set_error_once(&to->submit, from->fence.error);
submit            909 drivers/gpu/drm/i915/i915_request.c 		ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
submit            910 drivers/gpu/drm/i915/i915_request.c 						       &from->submit,
submit            916 drivers/gpu/drm/i915/i915_request.c 		ret = i915_sw_fence_await_dma_fence(&to->submit,
submit            978 drivers/gpu/drm/i915/i915_request.c 			ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
submit           1028 drivers/gpu/drm/i915/i915_request.c 			ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
submit           1155 drivers/gpu/drm/i915/i915_request.c 			i915_sw_fence_await_sw_fence(&rq->submit,
submit           1156 drivers/gpu/drm/i915/i915_request.c 						     &prev->submit,
submit           1159 drivers/gpu/drm/i915/i915_request.c 			__i915_sw_fence_await_dma_fence(&rq->submit,
submit           1235 drivers/gpu/drm/i915/i915_request.c 	i915_sw_fence_commit(&rq->submit);
submit            144 drivers/gpu/drm/i915/i915_request.h 	struct i915_sw_fence submit;
submit             80 drivers/gpu/drm/i915/selftests/i915_active.c 	struct i915_sw_fence *submit;
submit             90 drivers/gpu/drm/i915/selftests/i915_active.c 	submit = heap_fence_create(GFP_KERNEL);
submit             91 drivers/gpu/drm/i915/selftests/i915_active.c 	if (!submit) {
submit            109 drivers/gpu/drm/i915/selftests/i915_active.c 		err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
submit            110 drivers/gpu/drm/i915/selftests/i915_active.c 						       submit,
submit            135 drivers/gpu/drm/i915/selftests/i915_active.c 	i915_sw_fence_commit(submit);
submit            136 drivers/gpu/drm/i915/selftests/i915_active.c 	heap_fence_put(submit);
submit            490 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 			err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
submit            314 drivers/gpu/drm/i915/selftests/i915_request.c 		struct i915_sw_fence *submit, *wait;
submit            317 drivers/gpu/drm/i915/selftests/i915_request.c 		submit = heap_fence_create(GFP_KERNEL);
submit            318 drivers/gpu/drm/i915/selftests/i915_request.c 		if (!submit) {
submit            325 drivers/gpu/drm/i915/selftests/i915_request.c 			i915_sw_fence_commit(submit);
submit            326 drivers/gpu/drm/i915/selftests/i915_request.c 			heap_fence_put(submit);
submit            353 drivers/gpu/drm/i915/selftests/i915_request.c 			err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
submit            354 drivers/gpu/drm/i915/selftests/i915_request.c 							       submit,
submit            375 drivers/gpu/drm/i915/selftests/i915_request.c 		i915_sw_fence_commit(submit);
submit            409 drivers/gpu/drm/i915/selftests/i915_request.c 		heap_fence_put(submit);
submit             97 drivers/gpu/drm/lima/lima_drv.c 	struct lima_submit submit = {0};
submit            111 drivers/gpu/drm/lima/lima_drv.c 	bos = kvcalloc(args->nr_bos, sizeof(*submit.bos) + sizeof(*submit.lbos), GFP_KERNEL);
submit            115 drivers/gpu/drm/lima/lima_drv.c 	size = args->nr_bos * sizeof(*submit.bos);
submit            143 drivers/gpu/drm/lima/lima_drv.c 	submit.pipe = args->pipe;
submit            144 drivers/gpu/drm/lima/lima_drv.c 	submit.bos = bos;
submit            145 drivers/gpu/drm/lima/lima_drv.c 	submit.lbos = (void *)bos + size;
submit            146 drivers/gpu/drm/lima/lima_drv.c 	submit.nr_bos = args->nr_bos;
submit            147 drivers/gpu/drm/lima/lima_drv.c 	submit.task = task;
submit            148 drivers/gpu/drm/lima/lima_drv.c 	submit.ctx = ctx;
submit            149 drivers/gpu/drm/lima/lima_drv.c 	submit.flags = args->flags;
submit            150 drivers/gpu/drm/lima/lima_drv.c 	submit.in_sync[0] = args->in_sync[0];
submit            151 drivers/gpu/drm/lima/lima_drv.c 	submit.in_sync[1] = args->in_sync[1];
submit            152 drivers/gpu/drm/lima/lima_drv.c 	submit.out_sync = args->out_sync;
submit            154 drivers/gpu/drm/lima/lima_drv.c 	err = lima_gem_submit(file, &submit);
submit            206 drivers/gpu/drm/lima/lima_gem.c static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
submit            210 drivers/gpu/drm/lima/lima_gem.c 	for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) {
submit            213 drivers/gpu/drm/lima/lima_gem.c 		if (!submit->in_sync[i])
submit            216 drivers/gpu/drm/lima/lima_gem.c 		err = drm_syncobj_find_fence(file, submit->in_sync[i],
submit            221 drivers/gpu/drm/lima/lima_gem.c 		err = drm_gem_fence_array_add(&submit->task->deps, fence);
submit            231 drivers/gpu/drm/lima/lima_gem.c int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
submit            239 drivers/gpu/drm/lima/lima_gem.c 	struct lima_bo **bos = submit->lbos;
submit            241 drivers/gpu/drm/lima/lima_gem.c 	if (submit->out_sync) {
submit            242 drivers/gpu/drm/lima/lima_gem.c 		out_sync = drm_syncobj_find(file, submit->out_sync);
submit            247 drivers/gpu/drm/lima/lima_gem.c 	for (i = 0; i < submit->nr_bos; i++) {
submit            251 drivers/gpu/drm/lima/lima_gem.c 		obj = drm_gem_object_lookup(file, submit->bos[i].handle);
submit            271 drivers/gpu/drm/lima/lima_gem.c 	err = lima_gem_lock_bos(bos, submit->nr_bos, &ctx);
submit            276 drivers/gpu/drm/lima/lima_gem.c 		submit->task, submit->ctx->context + submit->pipe,
submit            277 drivers/gpu/drm/lima/lima_gem.c 		bos, submit->nr_bos, vm);
submit            281 drivers/gpu/drm/lima/lima_gem.c 	err = lima_gem_add_deps(file, submit);
submit            285 drivers/gpu/drm/lima/lima_gem.c 	for (i = 0; i < submit->nr_bos; i++) {
submit            287 drivers/gpu/drm/lima/lima_gem.c 			submit->task, bos[i],
submit            288 drivers/gpu/drm/lima/lima_gem.c 			submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE,
submit            289 drivers/gpu/drm/lima/lima_gem.c 			submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE);
submit            295 drivers/gpu/drm/lima/lima_gem.c 		submit->ctx->context + submit->pipe, submit->task);
submit            297 drivers/gpu/drm/lima/lima_gem.c 	for (i = 0; i < submit->nr_bos; i++) {
submit            298 drivers/gpu/drm/lima/lima_gem.c 		if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
submit            304 drivers/gpu/drm/lima/lima_gem.c 	lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
submit            306 drivers/gpu/drm/lima/lima_gem.c 	for (i = 0; i < submit->nr_bos; i++)
submit            319 drivers/gpu/drm/lima/lima_gem.c 	lima_sched_task_fini(submit->task);
submit            321 drivers/gpu/drm/lima/lima_gem.c 	lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
submit            323 drivers/gpu/drm/lima/lima_gem.c 	for (i = 0; i < submit->nr_bos; i++) {
submit             20 drivers/gpu/drm/lima/lima_gem.h int lima_gem_submit(struct drm_file *file, struct lima_submit *submit);
submit            422 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 		.submit = adreno_submit,
submit            441 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 		.submit = adreno_submit,
submit            531 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 		.submit = adreno_submit,
submit             46 drivers/gpu/drm/msm/adreno/a5xx_gpu.c static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit,
submit             50 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	struct msm_ringbuffer *ring = submit->ring;
submit             55 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	for (i = 0; i < submit->nr_cmds; i++) {
submit             56 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		switch (submit->cmd[i].type) {
submit             65 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 			obj = submit->bos[submit->cmd[i].idx].obj;
submit             66 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 			dwords = submit->cmd[i].size;
submit            102 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	ring->memptrs->fence = submit->seqno;
submit            106 drivers/gpu/drm/msm/adreno/a5xx_gpu.c static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
submit            112 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	struct msm_ringbuffer *ring = submit->ring;
submit            115 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
submit            117 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		a5xx_submit_in_rb(gpu, submit, ctx);
submit            130 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
submit            131 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
submit            146 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	for (i = 0; i < submit->nr_cmds; i++) {
submit            147 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		switch (submit->cmd[i].type) {
submit            156 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
submit            157 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 			OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
submit            158 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 			OUT_RING(ring, submit->cmd[i].size);
submit            182 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, submit->seqno);
submit            192 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, submit->seqno);
submit           1383 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		.submit = a5xx_submit,
submit             82 drivers/gpu/drm/msm/adreno/a6xx_gpu.c static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
submit             85 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
submit             89 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	struct msm_ringbuffer *ring = submit->ring;
submit            111 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	for (i = 0; i < submit->nr_cmds; i++) {
submit            112 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 		switch (submit->cmd[i].type) {
submit            121 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
submit            122 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 			OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
submit            123 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 			OUT_RING(ring, submit->cmd[i].size);
submit            135 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, submit->seqno);
submit            145 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, submit->seqno);
submit            147 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	trace_msm_gpu_submit_flush(submit,
submit            828 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 		.submit = a6xx_submit,
submit            414 drivers/gpu/drm/msm/adreno/adreno_gpu.c void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
submit            419 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	struct msm_ringbuffer *ring = submit->ring;
submit            422 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	for (i = 0; i < submit->nr_cmds; i++) {
submit            423 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		switch (submit->cmd[i].type) {
submit            435 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
submit            436 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			OUT_RING(ring, submit->cmd[i].size);
submit            443 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	OUT_RING(ring, submit->seqno);
submit            463 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_RING(ring, submit->seqno);
submit            469 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_RING(ring, submit->seqno);
submit            226 drivers/gpu/drm/msm/adreno/adreno_gpu.h void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
submit            262 drivers/gpu/drm/msm/msm_drv.h void msm_gem_submit_free(struct msm_gem_submit *submit);
submit            391 drivers/gpu/drm/msm/msm_drv.h void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
submit            398 drivers/gpu/drm/msm/msm_drv.h static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
submit             32 drivers/gpu/drm/msm/msm_gem_submit.c 	struct msm_gem_submit *submit;
submit             33 drivers/gpu/drm/msm/msm_gem_submit.c 	uint64_t sz = struct_size(submit, bos, nr_bos) +
submit             34 drivers/gpu/drm/msm/msm_gem_submit.c 				  ((u64)nr_cmds * sizeof(submit->cmd[0]));
submit             39 drivers/gpu/drm/msm/msm_gem_submit.c 	submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
submit             40 drivers/gpu/drm/msm/msm_gem_submit.c 	if (!submit)
submit             43 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->dev = dev;
submit             44 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->aspace = aspace;
submit             45 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->gpu = gpu;
submit             46 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->fence = NULL;
submit             47 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->cmd = (void *)&submit->bos[nr_bos];
submit             48 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->queue = queue;
submit             49 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->ring = gpu->rb[queue->prio];
submit             52 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->nr_bos = 0;
submit             53 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->nr_cmds = 0;
submit             55 drivers/gpu/drm/msm/msm_gem_submit.c 	INIT_LIST_HEAD(&submit->node);
submit             56 drivers/gpu/drm/msm/msm_gem_submit.c 	INIT_LIST_HEAD(&submit->bo_list);
submit             57 drivers/gpu/drm/msm/msm_gem_submit.c 	ww_acquire_init(&submit->ticket, &reservation_ww_class);
submit             59 drivers/gpu/drm/msm/msm_gem_submit.c 	return submit;
submit             62 drivers/gpu/drm/msm/msm_gem_submit.c void msm_gem_submit_free(struct msm_gem_submit *submit)
submit             64 drivers/gpu/drm/msm/msm_gem_submit.c 	dma_fence_put(submit->fence);
submit             65 drivers/gpu/drm/msm/msm_gem_submit.c 	list_del(&submit->node);
submit             66 drivers/gpu/drm/msm/msm_gem_submit.c 	put_pid(submit->pid);
submit             67 drivers/gpu/drm/msm/msm_gem_submit.c 	msm_submitqueue_put(submit->queue);
submit             69 drivers/gpu/drm/msm/msm_gem_submit.c 	kfree(submit);
submit             72 drivers/gpu/drm/msm/msm_gem_submit.c static int submit_lookup_objects(struct msm_gem_submit *submit,
submit             86 drivers/gpu/drm/msm/msm_gem_submit.c 		submit->bos[i].flags = 0;
submit            105 drivers/gpu/drm/msm/msm_gem_submit.c 		submit->bos[i].handle = submit_bo.handle;
submit            106 drivers/gpu/drm/msm/msm_gem_submit.c 		submit->bos[i].flags = submit_bo.flags;
submit            108 drivers/gpu/drm/msm/msm_gem_submit.c 		submit->bos[i].iova  = submit_bo.presumed;
submit            120 drivers/gpu/drm/msm/msm_gem_submit.c 		obj = idr_find(&file->object_idr, submit->bos[i].handle);
submit            122 drivers/gpu/drm/msm/msm_gem_submit.c 			DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
submit            131 drivers/gpu/drm/msm/msm_gem_submit.c 					submit->bos[i].handle, i);
submit            138 drivers/gpu/drm/msm/msm_gem_submit.c 		submit->bos[i].obj = msm_obj;
submit            140 drivers/gpu/drm/msm/msm_gem_submit.c 		list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
submit            147 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->nr_bos = i;
submit            152 drivers/gpu/drm/msm/msm_gem_submit.c static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
submit            155 drivers/gpu/drm/msm/msm_gem_submit.c 	struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit            157 drivers/gpu/drm/msm/msm_gem_submit.c 	if (submit->bos[i].flags & BO_PINNED)
submit            158 drivers/gpu/drm/msm/msm_gem_submit.c 		msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
submit            160 drivers/gpu/drm/msm/msm_gem_submit.c 	if (submit->bos[i].flags & BO_LOCKED)
submit            163 drivers/gpu/drm/msm/msm_gem_submit.c 	if (backoff && !(submit->bos[i].flags & BO_VALID))
submit            164 drivers/gpu/drm/msm/msm_gem_submit.c 		submit->bos[i].iova = 0;
submit            166 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
submit            170 drivers/gpu/drm/msm/msm_gem_submit.c static int submit_lock_objects(struct msm_gem_submit *submit)
submit            175 drivers/gpu/drm/msm/msm_gem_submit.c 	for (i = 0; i < submit->nr_bos; i++) {
submit            176 drivers/gpu/drm/msm/msm_gem_submit.c 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit            183 drivers/gpu/drm/msm/msm_gem_submit.c 		if (!(submit->bos[i].flags & BO_LOCKED)) {
submit            185 drivers/gpu/drm/msm/msm_gem_submit.c 					&submit->ticket);
submit            188 drivers/gpu/drm/msm/msm_gem_submit.c 			submit->bos[i].flags |= BO_LOCKED;
submit            192 drivers/gpu/drm/msm/msm_gem_submit.c 	ww_acquire_done(&submit->ticket);
submit            198 drivers/gpu/drm/msm/msm_gem_submit.c 		submit_unlock_unpin_bo(submit, i, true);
submit            201 drivers/gpu/drm/msm/msm_gem_submit.c 		submit_unlock_unpin_bo(submit, slow_locked, true);
submit            204 drivers/gpu/drm/msm/msm_gem_submit.c 		struct msm_gem_object *msm_obj = submit->bos[contended].obj;
submit            207 drivers/gpu/drm/msm/msm_gem_submit.c 				&submit->ticket);
submit            209 drivers/gpu/drm/msm/msm_gem_submit.c 			submit->bos[contended].flags |= BO_LOCKED;
submit            218 drivers/gpu/drm/msm/msm_gem_submit.c static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
submit            222 drivers/gpu/drm/msm/msm_gem_submit.c 	for (i = 0; i < submit->nr_bos; i++) {
submit            223 drivers/gpu/drm/msm/msm_gem_submit.c 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit            224 drivers/gpu/drm/msm/msm_gem_submit.c 		bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
submit            241 drivers/gpu/drm/msm/msm_gem_submit.c 		ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
submit            250 drivers/gpu/drm/msm/msm_gem_submit.c static int submit_pin_objects(struct msm_gem_submit *submit)
submit            254 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->valid = true;
submit            256 drivers/gpu/drm/msm/msm_gem_submit.c 	for (i = 0; i < submit->nr_bos; i++) {
submit            257 drivers/gpu/drm/msm/msm_gem_submit.c 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit            262 drivers/gpu/drm/msm/msm_gem_submit.c 				submit->aspace, &iova);
submit            267 drivers/gpu/drm/msm/msm_gem_submit.c 		submit->bos[i].flags |= BO_PINNED;
submit            269 drivers/gpu/drm/msm/msm_gem_submit.c 		if (iova == submit->bos[i].iova) {
submit            270 drivers/gpu/drm/msm/msm_gem_submit.c 			submit->bos[i].flags |= BO_VALID;
submit            272 drivers/gpu/drm/msm/msm_gem_submit.c 			submit->bos[i].iova = iova;
submit            274 drivers/gpu/drm/msm/msm_gem_submit.c 			submit->bos[i].flags &= ~BO_VALID;
submit            275 drivers/gpu/drm/msm/msm_gem_submit.c 			submit->valid = false;
submit            282 drivers/gpu/drm/msm/msm_gem_submit.c static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
submit            285 drivers/gpu/drm/msm/msm_gem_submit.c 	if (idx >= submit->nr_bos) {
submit            287 drivers/gpu/drm/msm/msm_gem_submit.c 				idx, submit->nr_bos);
submit            292 drivers/gpu/drm/msm/msm_gem_submit.c 		*obj = submit->bos[idx].obj;
submit            294 drivers/gpu/drm/msm/msm_gem_submit.c 		*iova = submit->bos[idx].iova;
submit            296 drivers/gpu/drm/msm/msm_gem_submit.c 		*valid = !!(submit->bos[idx].flags & BO_VALID);
submit            302 drivers/gpu/drm/msm/msm_gem_submit.c static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
submit            358 drivers/gpu/drm/msm/msm_gem_submit.c 		ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
submit            383 drivers/gpu/drm/msm/msm_gem_submit.c static void submit_cleanup(struct msm_gem_submit *submit)
submit            387 drivers/gpu/drm/msm/msm_gem_submit.c 	for (i = 0; i < submit->nr_bos; i++) {
submit            388 drivers/gpu/drm/msm/msm_gem_submit.c 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit            389 drivers/gpu/drm/msm/msm_gem_submit.c 		submit_unlock_unpin_bo(submit, i, false);
submit            394 drivers/gpu/drm/msm/msm_gem_submit.c 	ww_acquire_fini(&submit->ticket);
submit            404 drivers/gpu/drm/msm/msm_gem_submit.c 	struct msm_gem_submit *submit;
submit            475 drivers/gpu/drm/msm/msm_gem_submit.c 	submit = submit_create(dev, gpu, ctx->aspace, queue, args->nr_bos,
submit            477 drivers/gpu/drm/msm/msm_gem_submit.c 	if (!submit) {
submit            482 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->pid = pid;
submit            483 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->ident = submitid;
submit            486 drivers/gpu/drm/msm/msm_gem_submit.c 		submit->in_rb = true;
submit            488 drivers/gpu/drm/msm/msm_gem_submit.c 	ret = submit_lookup_objects(submit, args, file);
submit            492 drivers/gpu/drm/msm/msm_gem_submit.c 	ret = submit_lock_objects(submit);
submit            496 drivers/gpu/drm/msm/msm_gem_submit.c 	ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
submit            500 drivers/gpu/drm/msm/msm_gem_submit.c 	ret = submit_pin_objects(submit);
submit            529 drivers/gpu/drm/msm/msm_gem_submit.c 		ret = submit_bo(submit, submit_cmd.submit_idx,
submit            549 drivers/gpu/drm/msm/msm_gem_submit.c 		submit->cmd[i].type = submit_cmd.type;
submit            550 drivers/gpu/drm/msm/msm_gem_submit.c 		submit->cmd[i].size = submit_cmd.size / 4;
submit            551 drivers/gpu/drm/msm/msm_gem_submit.c 		submit->cmd[i].iova = iova + submit_cmd.submit_offset;
submit            552 drivers/gpu/drm/msm/msm_gem_submit.c 		submit->cmd[i].idx  = submit_cmd.submit_idx;
submit            554 drivers/gpu/drm/msm/msm_gem_submit.c 		if (submit->valid)
submit            557 drivers/gpu/drm/msm/msm_gem_submit.c 		ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
submit            563 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->nr_cmds = i;
submit            565 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->fence = msm_fence_alloc(ring->fctx);
submit            566 drivers/gpu/drm/msm/msm_gem_submit.c 	if (IS_ERR(submit->fence)) {
submit            567 drivers/gpu/drm/msm/msm_gem_submit.c 		ret = PTR_ERR(submit->fence);
submit            568 drivers/gpu/drm/msm/msm_gem_submit.c 		submit->fence = NULL;
submit            573 drivers/gpu/drm/msm/msm_gem_submit.c 		sync_file = sync_file_create(submit->fence);
submit            580 drivers/gpu/drm/msm/msm_gem_submit.c 	msm_gpu_submit(gpu, submit, ctx);
submit            582 drivers/gpu/drm/msm/msm_gem_submit.c 	args->fence = submit->fence->seqno;
submit            590 drivers/gpu/drm/msm/msm_gem_submit.c 	submit_cleanup(submit);
submit            592 drivers/gpu/drm/msm/msm_gem_submit.c 		msm_gem_submit_free(submit);
submit            337 drivers/gpu/drm/msm/msm_gpu.c 		struct msm_gem_submit *submit, char *comm, char *cmd)
submit            357 drivers/gpu/drm/msm/msm_gpu.c 	if (submit) {
submit            360 drivers/gpu/drm/msm/msm_gpu.c 		state->bos = kcalloc(submit->nr_cmds,
submit            363 drivers/gpu/drm/msm/msm_gpu.c 		for (i = 0; state->bos && i < submit->nr_cmds; i++) {
submit            364 drivers/gpu/drm/msm/msm_gpu.c 			int idx = submit->cmd[i].idx;
submit            366 drivers/gpu/drm/msm/msm_gpu.c 			msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
submit            367 drivers/gpu/drm/msm/msm_gpu.c 				submit->bos[idx].iova, submit->bos[idx].flags);
submit            380 drivers/gpu/drm/msm/msm_gpu.c 		struct msm_gem_submit *submit, char *comm, char *cmd)
submit            392 drivers/gpu/drm/msm/msm_gpu.c 	struct msm_gem_submit *submit;
submit            394 drivers/gpu/drm/msm/msm_gpu.c 	list_for_each_entry(submit, &ring->submits, node) {
submit            395 drivers/gpu/drm/msm/msm_gpu.c 		if (submit->seqno > fence)
submit            398 drivers/gpu/drm/msm/msm_gpu.c 		msm_update_fence(submit->ring->fctx,
submit            399 drivers/gpu/drm/msm/msm_gpu.c 			submit->fence->seqno);
submit            406 drivers/gpu/drm/msm/msm_gpu.c 	struct msm_gem_submit *submit;
submit            410 drivers/gpu/drm/msm/msm_gpu.c 	list_for_each_entry(submit, &ring->submits, node)
submit            411 drivers/gpu/drm/msm/msm_gpu.c 		if (submit->seqno == fence)
submit            412 drivers/gpu/drm/msm/msm_gpu.c 			return submit;
submit            424 drivers/gpu/drm/msm/msm_gpu.c 	struct msm_gem_submit *submit;
submit            433 drivers/gpu/drm/msm/msm_gpu.c 	submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
submit            434 drivers/gpu/drm/msm/msm_gpu.c 	if (submit) {
submit            439 drivers/gpu/drm/msm/msm_gpu.c 		submit->queue->faults++;
submit            441 drivers/gpu/drm/msm/msm_gpu.c 		task = get_pid_task(submit->pid, PIDTYPE_PID);
submit            452 drivers/gpu/drm/msm/msm_gpu.c 			msm_rd_dump_submit(priv->hangrd, submit,
submit            455 drivers/gpu/drm/msm/msm_gpu.c 			msm_rd_dump_submit(priv->hangrd, submit, NULL);
submit            460 drivers/gpu/drm/msm/msm_gpu.c 	msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
submit            501 drivers/gpu/drm/msm/msm_gpu.c 			list_for_each_entry(submit, &ring->submits, node)
submit            502 drivers/gpu/drm/msm/msm_gpu.c 				gpu->funcs->submit(gpu, submit, NULL);
submit            653 drivers/gpu/drm/msm/msm_gpu.c 		struct msm_gem_submit *submit)
submit            655 drivers/gpu/drm/msm/msm_gpu.c 	int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
submit            671 drivers/gpu/drm/msm/msm_gpu.c 	trace_msm_gpu_submit_retired(submit, elapsed, clock,
submit            674 drivers/gpu/drm/msm/msm_gpu.c 	for (i = 0; i < submit->nr_bos; i++) {
submit            675 drivers/gpu/drm/msm/msm_gpu.c 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit            678 drivers/gpu/drm/msm/msm_gpu.c 		msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
submit            684 drivers/gpu/drm/msm/msm_gpu.c 	msm_gem_submit_free(submit);
submit            690 drivers/gpu/drm/msm/msm_gpu.c 	struct msm_gem_submit *submit, *tmp;
submit            699 drivers/gpu/drm/msm/msm_gpu.c 		list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
submit            700 drivers/gpu/drm/msm/msm_gpu.c 			if (dma_fence_is_signaled(submit->fence))
submit            701 drivers/gpu/drm/msm/msm_gpu.c 				retire_submit(gpu, ring, submit);
submit            729 drivers/gpu/drm/msm/msm_gpu.c void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
submit            734 drivers/gpu/drm/msm/msm_gpu.c 	struct msm_ringbuffer *ring = submit->ring;
submit            743 drivers/gpu/drm/msm/msm_gpu.c 	submit->seqno = ++ring->seqno;
submit            745 drivers/gpu/drm/msm/msm_gpu.c 	list_add_tail(&submit->node, &ring->submits);
submit            747 drivers/gpu/drm/msm/msm_gpu.c 	msm_rd_dump_submit(priv->rd, submit, NULL);
submit            751 drivers/gpu/drm/msm/msm_gpu.c 	for (i = 0; i < submit->nr_bos; i++) {
submit            752 drivers/gpu/drm/msm/msm_gpu.c 		struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit            762 drivers/gpu/drm/msm/msm_gpu.c 		msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova);
submit            764 drivers/gpu/drm/msm/msm_gpu.c 		if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
submit            765 drivers/gpu/drm/msm/msm_gpu.c 			msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
submit            766 drivers/gpu/drm/msm/msm_gpu.c 		else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
submit            767 drivers/gpu/drm/msm/msm_gpu.c 			msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
submit            770 drivers/gpu/drm/msm/msm_gpu.c 	gpu->funcs->submit(gpu, submit, ctx);
submit             48 drivers/gpu/drm/msm/msm_gpu.h 	void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
submit            275 drivers/gpu/drm/msm/msm_gpu.h void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
submit             34 drivers/gpu/drm/msm/msm_gpu_trace.h 	    TP_PROTO(struct msm_gem_submit *submit, u64 ticks),
submit             35 drivers/gpu/drm/msm/msm_gpu_trace.h 	    TP_ARGS(submit, ticks),
submit             44 drivers/gpu/drm/msm/msm_gpu_trace.h 		    __entry->pid = pid_nr(submit->pid);
submit             45 drivers/gpu/drm/msm/msm_gpu_trace.h 		    __entry->id = submit->ident;
submit             46 drivers/gpu/drm/msm/msm_gpu_trace.h 		    __entry->ringid = submit->ring->id;
submit             47 drivers/gpu/drm/msm/msm_gpu_trace.h 		    __entry->seqno = submit->seqno;
submit             57 drivers/gpu/drm/msm/msm_gpu_trace.h 	    TP_PROTO(struct msm_gem_submit *submit, u64 elapsed, u64 clock,
submit             59 drivers/gpu/drm/msm/msm_gpu_trace.h 	    TP_ARGS(submit, elapsed, clock, start, end),
submit             71 drivers/gpu/drm/msm/msm_gpu_trace.h 		    __entry->pid = pid_nr(submit->pid);
submit             72 drivers/gpu/drm/msm/msm_gpu_trace.h 		    __entry->id = submit->ident;
submit             73 drivers/gpu/drm/msm/msm_gpu_trace.h 		    __entry->ringid = submit->ring->id;
submit             74 drivers/gpu/drm/msm/msm_gpu_trace.h 		    __entry->seqno = submit->seqno;
submit             86 drivers/gpu/drm/msm/msm_rd.c 	struct msm_gem_submit *submit;
submit            300 drivers/gpu/drm/msm/msm_rd.c 		struct msm_gem_submit *submit, int idx,
submit            303 drivers/gpu/drm/msm/msm_rd.c 	struct msm_gem_object *obj = submit->bos[idx].obj;
submit            308 drivers/gpu/drm/msm/msm_rd.c 		offset = iova - submit->bos[idx].iova;
submit            310 drivers/gpu/drm/msm/msm_rd.c 		iova = submit->bos[idx].iova;
submit            322 drivers/gpu/drm/msm/msm_rd.c 	if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
submit            337 drivers/gpu/drm/msm/msm_rd.c should_dump(struct msm_gem_submit *submit, int idx)
submit            339 drivers/gpu/drm/msm/msm_rd.c 	return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
submit            343 drivers/gpu/drm/msm/msm_rd.c void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
submit            346 drivers/gpu/drm/msm/msm_rd.c 	struct drm_device *dev = submit->dev;
submit            370 drivers/gpu/drm/msm/msm_rd.c 	task = pid_task(submit->pid, PIDTYPE_PID);
submit            374 drivers/gpu/drm/msm/msm_rd.c 				pid_nr(submit->pid), submit->seqno);
submit            377 drivers/gpu/drm/msm/msm_rd.c 				pid_nr(submit->pid), submit->seqno);
submit            383 drivers/gpu/drm/msm/msm_rd.c 	for (i = 0; i < submit->nr_bos; i++)
submit            384 drivers/gpu/drm/msm/msm_rd.c 		if (should_dump(submit, i))
submit            385 drivers/gpu/drm/msm/msm_rd.c 			snapshot_buf(rd, submit, i, 0, 0);
submit            387 drivers/gpu/drm/msm/msm_rd.c 	for (i = 0; i < submit->nr_cmds; i++) {
submit            388 drivers/gpu/drm/msm/msm_rd.c 		uint64_t iova = submit->cmd[i].iova;
submit            389 drivers/gpu/drm/msm/msm_rd.c 		uint32_t szd  = submit->cmd[i].size; /* in dwords */
submit            392 drivers/gpu/drm/msm/msm_rd.c 		if (!should_dump(submit, i)) {
submit            393 drivers/gpu/drm/msm/msm_rd.c 			snapshot_buf(rd, submit, submit->cmd[i].idx,
submit            394 drivers/gpu/drm/msm/msm_rd.c 					submit->cmd[i].iova, szd * 4);
submit            397 drivers/gpu/drm/msm/msm_rd.c 		switch (submit->cmd[i].type) {
submit            538 drivers/gpu/drm/panfrost/panfrost_drv.c 	PANFROST_IOCTL(SUBMIT,		submit,		DRM_RENDER_ALLOW | DRM_AUTH),
submit            710 drivers/gpu/drm/tegra/drm.c 	err = context->client->ops->submit(context, args, drm, file);
submit             75 drivers/gpu/drm/tegra/drm.h 	int (*submit)(struct tegra_drm_context *context,
submit            155 drivers/gpu/drm/tegra/gr2d.c 	.submit = tegra_drm_submit,
submit            155 drivers/gpu/drm/tegra/gr3d.c 	.submit = tegra_drm_submit,
submit            326 drivers/gpu/drm/tegra/vic.c 	.submit = tegra_drm_submit,
submit            194 drivers/gpu/drm/vc4/vc4_validate_shaders.c 	bool submit = is_tmu_submit(waddr);
submit            195 drivers/gpu/drm/vc4/vc4_validate_shaders.c 	bool is_direct = submit && validation_state->tmu_write_count[tmu] == 0;
submit            275 drivers/gpu/drm/vc4/vc4_validate_shaders.c 	if (submit) {
submit             33 drivers/gpu/host1x/dev.h 	int (*submit)(struct host1x_job *job);
submit            239 drivers/gpu/host1x/dev.h 	return host->channel_op->submit(job);
submit            244 drivers/gpu/host1x/hw/channel_hw.c 	.submit = channel_submit,
submit            375 drivers/iio/buffer/industrialio-buffer-dma.c 	ret = queue->ops->submit(queue, block);
submit            124 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	.submit = iio_dmaengine_buffer_submit_block,
submit            785 drivers/md/dm-integrity.c 	struct async_submit_ctl submit;
submit            802 drivers/md/dm-integrity.c 	init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
submit            827 drivers/md/dm-integrity.c 		async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
submit            164 drivers/md/raid5-ppl.c 	struct async_submit_ctl submit;
submit            192 drivers/md/raid5-ppl.c 	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, tx,
submit            197 drivers/md/raid5-ppl.c 				  &submit);
submit            200 drivers/md/raid5-ppl.c 			       &submit);
submit            718 drivers/md/raid5-ppl.c 	struct async_submit_ctl submit;
submit            722 drivers/md/raid5-ppl.c 	init_async_submit(&submit, ASYNC_TX_ACK|ASYNC_TX_XOR_DROP_DST,
submit            724 drivers/md/raid5-ppl.c 	tx = async_xor(page1, xor_srcs, 0, 2, size, &submit);
submit           1236 drivers/md/raid5.c 	struct async_submit_ctl submit;
submit           1246 drivers/md/raid5.c 	init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
submit           1275 drivers/md/raid5.c 						  b_offset, clen, &submit);
submit           1278 drivers/md/raid5.c 						  page_offset, clen, &submit);
submit           1281 drivers/md/raid5.c 		submit.depend_tx = tx;
submit           1331 drivers/md/raid5.c 	struct async_submit_ctl submit;
submit           1356 drivers/md/raid5.c 	init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
submit           1357 drivers/md/raid5.c 	async_trigger_callback(&submit);
submit           1414 drivers/md/raid5.c 	struct async_submit_ctl submit;
submit           1429 drivers/md/raid5.c 	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
submit           1432 drivers/md/raid5.c 		tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
submit           1434 drivers/md/raid5.c 		tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
submit           1494 drivers/md/raid5.c 	struct async_submit_ctl submit;
submit           1522 drivers/md/raid5.c 		init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
submit           1525 drivers/md/raid5.c 		tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
submit           1535 drivers/md/raid5.c 		init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
submit           1538 drivers/md/raid5.c 		tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
submit           1557 drivers/md/raid5.c 	struct async_submit_ctl submit;
submit           1597 drivers/md/raid5.c 			init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
submit           1601 drivers/md/raid5.c 						  STRIPE_SIZE, &submit);
submit           1620 drivers/md/raid5.c 			init_async_submit(&submit,
submit           1625 drivers/md/raid5.c 				       &submit);
submit           1628 drivers/md/raid5.c 			init_async_submit(&submit, ASYNC_TX_FENCE, tx,
submit           1632 drivers/md/raid5.c 						  STRIPE_SIZE, &submit);
submit           1635 drivers/md/raid5.c 		init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
submit           1642 drivers/md/raid5.c 						       blocks, &submit);
submit           1647 drivers/md/raid5.c 						       blocks, &submit);
submit           1674 drivers/md/raid5.c 	struct async_submit_ctl submit;
submit           1692 drivers/md/raid5.c 	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
submit           1694 drivers/md/raid5.c 	tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
submit           1705 drivers/md/raid5.c 	struct async_submit_ctl submit;
submit           1712 drivers/md/raid5.c 	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx,
submit           1714 drivers/md/raid5.c 	tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE,  &submit);
submit           1843 drivers/md/raid5.c 	struct async_submit_ctl submit;
submit           1904 drivers/md/raid5.c 		init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh,
submit           1908 drivers/md/raid5.c 		init_async_submit(&submit, flags, tx, NULL, NULL,
submit           1913 drivers/md/raid5.c 		tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
submit           1915 drivers/md/raid5.c 		tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
submit           1928 drivers/md/raid5.c 	struct async_submit_ctl submit;
submit           1970 drivers/md/raid5.c 		init_async_submit(&submit, txflags, tx, ops_complete_reconstruct,
submit           1973 drivers/md/raid5.c 		init_async_submit(&submit, 0, tx, NULL, NULL,
submit           1975 drivers/md/raid5.c 	tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE,  &submit);
submit           2004 drivers/md/raid5.c 	struct async_submit_ctl submit;
submit           2021 drivers/md/raid5.c 	init_async_submit(&submit, 0, NULL, NULL, NULL,
submit           2024 drivers/md/raid5.c 			   &sh->ops.zero_sum_result, &submit);
submit           2027 drivers/md/raid5.c 	init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
submit           2028 drivers/md/raid5.c 	tx = async_trigger_callback(&submit);
submit           2034 drivers/md/raid5.c 	struct async_submit_ctl submit;
submit           2046 drivers/md/raid5.c 	init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
submit           2049 drivers/md/raid5.c 			   &sh->ops.zero_sum_result, percpu->spare_page, &submit);
submit           4317 drivers/md/raid5.c 			struct async_submit_ctl submit;
submit           4337 drivers/md/raid5.c 			init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
submit           4340 drivers/md/raid5.c 					  &submit);
submit            525 drivers/media/pci/cx18/cx18-mailbox.c 	int submit;
submit            569 drivers/media/pci/cx18/cx18-mailbox.c 	submit = epu_cmd_irq(cx, order);
submit            570 drivers/media/pci/cx18/cx18-mailbox.c 	if (submit > 0) {
submit            509 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	char __iomem *submit;
submit            532 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA;
submit            534 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	myri10ge_pio_copy(submit, &buf, sizeof(buf));
submit            712 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	char __iomem *submit;
submit            778 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	submit = mgp->sram + MXGEFW_BOOT_HANDOFF;
submit            780 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	myri10ge_pio_copy(submit, &buf, sizeof(buf));
submit           2086 drivers/net/usb/r8152.c 			goto submit;
submit           2171 drivers/net/usb/r8152.c submit:
submit            588 drivers/net/wireless/ath/carl9170/carl9170.h int carl9170_update_beacon(struct ar9170 *ar, const bool submit);
submit           1611 drivers/net/wireless/ath/carl9170/tx.c int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
submit           1695 drivers/net/wireless/ath/carl9170/tx.c 	if (submit) {
submit           1584 drivers/scsi/myrb.c 		goto submit;
submit           1626 drivers/scsi/myrb.c submit:
submit           1721 drivers/scsi/myrs.c 		goto submit;
submit           1773 drivers/scsi/myrs.c submit:
submit            354 fs/btrfs/compression.c 		int submit = 0;
submit            359 fs/btrfs/compression.c 			submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
submit            363 fs/btrfs/compression.c 		if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
submit            634 fs/btrfs/compression.c 		int submit = 0;
submit            641 fs/btrfs/compression.c 			submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE,
submit            645 fs/btrfs/compression.c 		if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
submit           8526 fs/btrfs/inode.c 		goto submit;
submit           8582 fs/btrfs/inode.c submit:
submit           1163 fs/f2fs/segment.c 			goto submit;
submit           1169 fs/f2fs/segment.c submit:
submit           1020 fs/gfs2/lock_dlm.c 	uint32_t *submit = NULL;
submit           1044 fs/gfs2/lock_dlm.c 	submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
submit           1046 fs/gfs2/lock_dlm.c 	if (!submit || !result) {
submit           1047 fs/gfs2/lock_dlm.c 		kfree(submit);
submit           1053 fs/gfs2/lock_dlm.c 	memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t));
submit           1057 fs/gfs2/lock_dlm.c 	ls->ls_recover_submit = submit;
submit            313 fs/io_uring.c  	struct sqe_submit	submit;
submit            497 fs/io_uring.c  	if (req->submit.sqe) {
submit            498 fs/io_uring.c  		switch (req->submit.sqe->opcode) {
submit           1893 fs/io_uring.c  	req->submit.sqe = NULL;
submit           2011 fs/io_uring.c  	req->submit.sequence = count;
submit           2031 fs/io_uring.c  		nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1;
submit           2032 fs/io_uring.c  		tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1;
submit           2082 fs/io_uring.c  	memcpy(&req->submit, s, sizeof(*s));
submit           2084 fs/io_uring.c  	req->submit.sqe = sqe_copy;
submit           2203 fs/io_uring.c  	async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
submit           2206 fs/io_uring.c  		struct sqe_submit *s = &req->submit;
submit           2430 fs/io_uring.c  			memcpy(&req->submit, s, sizeof(*s));
submit           2586 fs/io_uring.c  		memcpy(&req->submit, s, sizeof(*s));
submit           2591 fs/io_uring.c  		memcpy(&req->submit, s, sizeof(*s));
submit           2705 fs/io_uring.c  			io_queue_link_head(ctx, link, &link->submit, shadow_req);
submit           2736 fs/io_uring.c  		io_queue_link_head(ctx, link, &link->submit, shadow_req);
submit           2881 fs/io_uring.c  	int i, submit = 0;
submit           2899 fs/io_uring.c  			io_queue_link_head(ctx, link, &link->submit, shadow_req);
submit           2920 fs/io_uring.c  		submit++;
submit           2925 fs/io_uring.c  		io_queue_link_head(ctx, link, &link->submit, shadow_req);
submit           2931 fs/io_uring.c  	return submit;
submit             42 fs/iomap/direct-io.c 		} submit;
submit             69 fs/iomap/direct-io.c 	dio->submit.last_queue = bdev_get_queue(iomap->bdev);
submit             70 fs/iomap/direct-io.c 	dio->submit.cookie = submit_bio(bio);
submit            156 fs/iomap/direct-io.c 			struct task_struct *waiter = dio->submit.waiter;
submit            157 fs/iomap/direct-io.c 			WRITE_ONCE(dio->submit.waiter, NULL);
submit            203 fs/iomap/direct-io.c 	unsigned int align = iov_iter_alignment(dio->submit.iter);
submit            242 fs/iomap/direct-io.c 	iter = *dio->submit.iter;
submit            259 fs/iomap/direct-io.c 			iov_iter_revert(dio->submit.iter, copied);
submit            297 fs/iomap/direct-io.c 		iov_iter_advance(dio->submit.iter, n);
submit            329 fs/iomap/direct-io.c 	length = iov_iter_zero(length, dio->submit.iter);
submit            338 fs/iomap/direct-io.c 	struct iov_iter *iter = dio->submit.iter;
submit            426 fs/iomap/direct-io.c 	dio->submit.iter = iter;
submit            427 fs/iomap/direct-io.c 	dio->submit.waiter = current;
submit            428 fs/iomap/direct-io.c 	dio->submit.cookie = BLK_QC_T_NONE;
submit            429 fs/iomap/direct-io.c 	dio->submit.last_queue = NULL;
submit            524 fs/iomap/direct-io.c 	WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie);
submit            525 fs/iomap/direct-io.c 	WRITE_ONCE(iocb->private, dio->submit.last_queue);
submit            549 fs/iomap/direct-io.c 			if (!READ_ONCE(dio->submit.waiter))
submit            553 fs/iomap/direct-io.c 			    !dio->submit.last_queue ||
submit            554 fs/iomap/direct-io.c 			    !blk_poll(dio->submit.last_queue,
submit            555 fs/iomap/direct-io.c 					 dio->submit.cookie, true))
submit            103 include/linux/async_tx.h __async_tx_find_channel(struct async_submit_ctl *submit,
submit            118 include/linux/async_tx.h async_tx_find_channel(struct async_submit_ctl *submit,
submit            133 include/linux/async_tx.h async_tx_sync_epilog(struct async_submit_ctl *submit)
submit            135 include/linux/async_tx.h 	if (submit->cb_fn)
submit            136 include/linux/async_tx.h 		submit->cb_fn(submit->cb_param);
submit            159 include/linux/async_tx.h 		     struct async_submit_ctl *submit);
submit            163 include/linux/async_tx.h 	  int src_cnt, size_t len, struct async_submit_ctl *submit);
submit            168 include/linux/async_tx.h 	      struct async_submit_ctl *submit);
submit            173 include/linux/async_tx.h 	     struct async_submit_ctl *submit);
submit            175 include/linux/async_tx.h struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
submit            179 include/linux/async_tx.h 		   size_t len, struct async_submit_ctl *submit);
submit            184 include/linux/async_tx.h 		   struct async_submit_ctl *submit);
submit            188 include/linux/async_tx.h 			struct page **ptrs, struct async_submit_ctl *submit);
submit            192 include/linux/async_tx.h 			struct page **ptrs, struct async_submit_ctl *submit);
submit            126 include/linux/iio/buffer-dma.h 	int (*submit)(struct iio_dma_buffer_queue *queue,
submit            299 tools/io_uring/io_uring-bench.c submit:
submit            342 tools/io_uring/io_uring-bench.c 					goto submit;
submit            359 tools/io_uring/io_uring-bench.c 					goto submit;
submit            361 tools/io_uring/io_uring-bench.c 				goto submit;
submit             83 tools/io_uring/queue.c 		goto submit;
submit            125 tools/io_uring/queue.c submit: