workload          944 drivers/gpu/drm/amd/powerplay/amd_powerplay.c 	long workload;
workload          964 drivers/gpu/drm/amd/powerplay/amd_powerplay.c 		workload = hwmgr->workload_setting[index];
workload          969 drivers/gpu/drm/amd/powerplay/amd_powerplay.c 		workload = hwmgr->workload_setting[index];
workload          973 drivers/gpu/drm/amd/powerplay/amd_powerplay.c 		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
workload         1612 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 	long workload;
workload         1657 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 		workload = smu->workload_setting[index];
workload         1659 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 		if (smu->power_profile_mode != workload)
workload         1660 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 			smu_set_power_profile_mode(smu, &workload, 0);
workload         1698 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 	long workload;
workload         1713 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 		workload = smu->workload_setting[index];
workload         1718 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 		workload = smu->workload_setting[index];
workload         1722 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 		smu_set_power_profile_mode(smu, &workload, 0);
workload          263 drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c 	long workload;
workload          286 drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c 		workload = hwmgr->workload_setting[index];
workload          288 drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c 		if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode)
workload          289 drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c 			hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
workload           63 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h #define WORKLOAD_MAP(profile, workload) \
workload           64 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h 	[profile] = {1, (workload)}
workload          498 drivers/gpu/drm/i915/gvt/cmd_parser.c 	struct intel_vgpu_workload *workload;
workload          931 drivers/gpu/drm/i915/gvt/cmd_parser.c 			s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
workload         1157 drivers/gpu/drm/i915/gvt/cmd_parser.c 				s->workload->pending_events);
workload         1164 drivers/gpu/drm/i915/gvt/cmd_parser.c 			s->workload->pending_events);
workload         1678 drivers/gpu/drm/i915/gvt/cmd_parser.c 				s->workload->pending_events);
workload         1743 drivers/gpu/drm/i915/gvt/cmd_parser.c 		s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
workload         1759 drivers/gpu/drm/i915/gvt/cmd_parser.c 				"ppgtt" : "ggtt", s->ring_id, s->workload);
workload         1771 drivers/gpu/drm/i915/gvt/cmd_parser.c 				"ppgtt" : "ggtt", s->ring_id, s->workload);
workload         1805 drivers/gpu/drm/i915/gvt/cmd_parser.c 			"ppgtt" : "ggtt", s->ring_id, s->workload);
workload         1826 drivers/gpu/drm/i915/gvt/cmd_parser.c 		s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
workload         1895 drivers/gpu/drm/i915/gvt/cmd_parser.c 	list_add(&bb->list, &s->workload->shadow_bb);
workload         2673 drivers/gpu/drm/i915/gvt/cmd_parser.c 				"ppgtt" : "ggtt", s->ring_id, s->workload);
workload         2681 drivers/gpu/drm/i915/gvt/cmd_parser.c 			  s->workload, info->name);
workload         2765 drivers/gpu/drm/i915/gvt/cmd_parser.c static int scan_workload(struct intel_vgpu_workload *workload)
workload         2772 drivers/gpu/drm/i915/gvt/cmd_parser.c 	if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE)))
workload         2775 drivers/gpu/drm/i915/gvt/cmd_parser.c 	gma_head = workload->rb_start + workload->rb_head;
workload         2776 drivers/gpu/drm/i915/gvt/cmd_parser.c 	gma_tail = workload->rb_start + workload->rb_tail;
workload         2777 drivers/gpu/drm/i915/gvt/cmd_parser.c 	gma_bottom = workload->rb_start +  _RING_CTL_BUF_SIZE(workload->rb_ctl);
workload         2781 drivers/gpu/drm/i915/gvt/cmd_parser.c 	s.vgpu = workload->vgpu;
workload         2782 drivers/gpu/drm/i915/gvt/cmd_parser.c 	s.ring_id = workload->ring_id;
workload         2783 drivers/gpu/drm/i915/gvt/cmd_parser.c 	s.ring_start = workload->rb_start;
workload         2784 drivers/gpu/drm/i915/gvt/cmd_parser.c 	s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
workload         2787 drivers/gpu/drm/i915/gvt/cmd_parser.c 	s.rb_va = workload->shadow_ring_buffer_va;
workload         2788 drivers/gpu/drm/i915/gvt/cmd_parser.c 	s.workload = workload;
workload         2791 drivers/gpu/drm/i915/gvt/cmd_parser.c 	if ((bypass_scan_mask & (1 << workload->ring_id)) ||
workload         2799 drivers/gpu/drm/i915/gvt/cmd_parser.c 	ret = command_scan(&s, workload->rb_head, workload->rb_tail,
workload         2800 drivers/gpu/drm/i915/gvt/cmd_parser.c 		workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
workload         2812 drivers/gpu/drm/i915/gvt/cmd_parser.c 	struct intel_vgpu_workload *workload = container_of(wa_ctx,
workload         2830 drivers/gpu/drm/i915/gvt/cmd_parser.c 	s.vgpu = workload->vgpu;
workload         2831 drivers/gpu/drm/i915/gvt/cmd_parser.c 	s.ring_id = workload->ring_id;
workload         2837 drivers/gpu/drm/i915/gvt/cmd_parser.c 	s.workload = workload;
workload         2850 drivers/gpu/drm/i915/gvt/cmd_parser.c static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
workload         2852 drivers/gpu/drm/i915/gvt/cmd_parser.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload         2856 drivers/gpu/drm/i915/gvt/cmd_parser.c 	int ring_id = workload->ring_id;
workload         2859 drivers/gpu/drm/i915/gvt/cmd_parser.c 	guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
workload         2862 drivers/gpu/drm/i915/gvt/cmd_parser.c 	workload->rb_len = (workload->rb_tail + guest_rb_size -
workload         2863 drivers/gpu/drm/i915/gvt/cmd_parser.c 			workload->rb_head) % guest_rb_size;
workload         2865 drivers/gpu/drm/i915/gvt/cmd_parser.c 	gma_head = workload->rb_start + workload->rb_head;
workload         2866 drivers/gpu/drm/i915/gvt/cmd_parser.c 	gma_tail = workload->rb_start + workload->rb_tail;
workload         2867 drivers/gpu/drm/i915/gvt/cmd_parser.c 	gma_top = workload->rb_start + guest_rb_size;
workload         2869 drivers/gpu/drm/i915/gvt/cmd_parser.c 	if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
workload         2873 drivers/gpu/drm/i915/gvt/cmd_parser.c 		p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
workload         2880 drivers/gpu/drm/i915/gvt/cmd_parser.c 		s->ring_scan_buffer_size[ring_id] = workload->rb_len;
workload         2886 drivers/gpu/drm/i915/gvt/cmd_parser.c 	workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
workload         2897 drivers/gpu/drm/i915/gvt/cmd_parser.c 		gma_head = workload->rb_start;
workload         2910 drivers/gpu/drm/i915/gvt/cmd_parser.c int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
workload         2913 drivers/gpu/drm/i915/gvt/cmd_parser.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload         2915 drivers/gpu/drm/i915/gvt/cmd_parser.c 	ret = shadow_workload_ring_buffer(workload);
workload         2921 drivers/gpu/drm/i915/gvt/cmd_parser.c 	ret = scan_workload(workload);
workload         2933 drivers/gpu/drm/i915/gvt/cmd_parser.c 	struct intel_vgpu_workload *workload = container_of(wa_ctx,
workload         2936 drivers/gpu/drm/i915/gvt/cmd_parser.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload         2941 drivers/gpu/drm/i915/gvt/cmd_parser.c 	obj = i915_gem_object_create_shmem(workload->vgpu->gvt->dev_priv,
workload         2963 drivers/gpu/drm/i915/gvt/cmd_parser.c 	ret = copy_gma_to_hva(workload->vgpu,
workload         2964 drivers/gpu/drm/i915/gvt/cmd_parser.c 				workload->vgpu->gtt.ggtt_mm,
workload         3005 drivers/gpu/drm/i915/gvt/cmd_parser.c 	struct intel_vgpu_workload *workload = container_of(wa_ctx,
workload         3008 drivers/gpu/drm/i915/gvt/cmd_parser.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload           45 drivers/gpu/drm/i915/gvt/cmd_parser.h int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
workload          377 drivers/gpu/drm/i915/gvt/execlist.c static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
workload          379 drivers/gpu/drm/i915/gvt/execlist.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload          382 drivers/gpu/drm/i915/gvt/execlist.c 	int ring_id = workload->ring_id;
workload          385 drivers/gpu/drm/i915/gvt/execlist.c 	if (!workload->emulate_schedule_in)
workload          388 drivers/gpu/drm/i915/gvt/execlist.c 	ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
workload          389 drivers/gpu/drm/i915/gvt/execlist.c 	ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
workload          399 drivers/gpu/drm/i915/gvt/execlist.c static int complete_execlist_workload(struct intel_vgpu_workload *workload)
workload          401 drivers/gpu/drm/i915/gvt/execlist.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload          402 drivers/gpu/drm/i915/gvt/execlist.c 	int ring_id = workload->ring_id;
workload          410 drivers/gpu/drm/i915/gvt/execlist.c 	gvt_dbg_el("complete workload %p status %d\n", workload,
workload          411 drivers/gpu/drm/i915/gvt/execlist.c 			workload->status);
workload          413 drivers/gpu/drm/i915/gvt/execlist.c 	if (workload->status || (vgpu->resetting_eng & BIT(ring_id)))
workload          421 drivers/gpu/drm/i915/gvt/execlist.c 		this_desc = &workload->ctx_desc;
workload          432 drivers/gpu/drm/i915/gvt/execlist.c 	ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
workload          434 drivers/gpu/drm/i915/gvt/execlist.c 	intel_vgpu_unpin_mm(workload->shadow_mm);
workload          435 drivers/gpu/drm/i915/gvt/execlist.c 	intel_vgpu_destroy_workload(workload);
workload          444 drivers/gpu/drm/i915/gvt/execlist.c 	struct intel_vgpu_workload *workload = NULL;
workload          446 drivers/gpu/drm/i915/gvt/execlist.c 	workload = intel_vgpu_create_workload(vgpu, ring_id, desc);
workload          447 drivers/gpu/drm/i915/gvt/execlist.c 	if (IS_ERR(workload))
workload          448 drivers/gpu/drm/i915/gvt/execlist.c 		return PTR_ERR(workload);
workload          450 drivers/gpu/drm/i915/gvt/execlist.c 	workload->prepare = prepare_execlist_workload;
workload          451 drivers/gpu/drm/i915/gvt/execlist.c 	workload->complete = complete_execlist_workload;
workload          452 drivers/gpu/drm/i915/gvt/execlist.c 	workload->emulate_schedule_in = emulate_schedule_in;
workload          455 drivers/gpu/drm/i915/gvt/execlist.c 		workload->elsp_dwords = s->execlist[ring_id].elsp_dwords;
workload          457 drivers/gpu/drm/i915/gvt/execlist.c 	gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
workload          460 drivers/gpu/drm/i915/gvt/execlist.c 	intel_vgpu_queue_workload(workload);
workload          552 drivers/gpu/drm/i915/gvt/gvt.h int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
workload           58 drivers/gpu/drm/i915/gvt/scheduler.c static void update_shadow_pdps(struct intel_vgpu_workload *workload)
workload           61 drivers/gpu/drm/i915/gvt/scheduler.c 		workload->req->hw_context->state->obj;
workload           65 drivers/gpu/drm/i915/gvt/scheduler.c 	if (WARN_ON(!workload->shadow_mm))
workload           68 drivers/gpu/drm/i915/gvt/scheduler.c 	if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
workload           74 drivers/gpu/drm/i915/gvt/scheduler.c 			(void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
workload           83 drivers/gpu/drm/i915/gvt/scheduler.c static void sr_oa_regs(struct intel_vgpu_workload *workload,
workload           86 drivers/gpu/drm/i915/gvt/scheduler.c 	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
workload          100 drivers/gpu/drm/i915/gvt/scheduler.c 	if (workload->ring_id != RCS0)
workload          104 drivers/gpu/drm/i915/gvt/scheduler.c 		workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
workload          106 drivers/gpu/drm/i915/gvt/scheduler.c 		for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
workload          109 drivers/gpu/drm/i915/gvt/scheduler.c 			workload->flex_mmio[i] = reg_state[state_offset + 1];
workload          114 drivers/gpu/drm/i915/gvt/scheduler.c 		reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
workload          116 drivers/gpu/drm/i915/gvt/scheduler.c 		for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
workload          121 drivers/gpu/drm/i915/gvt/scheduler.c 			reg_state[state_offset + 1] = workload->flex_mmio[i];
workload          126 drivers/gpu/drm/i915/gvt/scheduler.c static int populate_shadow_context(struct intel_vgpu_workload *workload)
workload          128 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload          130 drivers/gpu/drm/i915/gvt/scheduler.c 	int ring_id = workload->ring_id;
workload          132 drivers/gpu/drm/i915/gvt/scheduler.c 		workload->req->hw_context->state->obj;
workload          142 drivers/gpu/drm/i915/gvt/scheduler.c 	sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
workload          144 drivers/gpu/drm/i915/gvt/scheduler.c 	intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
workload          147 drivers/gpu/drm/i915/gvt/scheduler.c 		intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
workload          165 drivers/gpu/drm/i915/gvt/scheduler.c 			workload->ring_context_gpa +
workload          171 drivers/gpu/drm/i915/gvt/scheduler.c 	sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
workload          178 drivers/gpu/drm/i915/gvt/scheduler.c 			workload->ctx_desc.lrca);
workload          190 drivers/gpu/drm/i915/gvt/scheduler.c 				(u32)((workload->ctx_desc.lrca + i) <<
workload          234 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu_workload *workload;
workload          251 drivers/gpu/drm/i915/gvt/scheduler.c 	workload = scheduler->current_workload[ring_id];
workload          252 drivers/gpu/drm/i915/gvt/scheduler.c 	if (unlikely(!workload))
workload          258 drivers/gpu/drm/i915/gvt/scheduler.c 		if (workload->vgpu != scheduler->engine_owner[ring_id]) {
workload          261 drivers/gpu/drm/i915/gvt/scheduler.c 					      workload->vgpu, ring_id);
workload          262 drivers/gpu/drm/i915/gvt/scheduler.c 			scheduler->engine_owner[ring_id] = workload->vgpu;
workload          265 drivers/gpu/drm/i915/gvt/scheduler.c 				      ring_id, workload->vgpu->id);
workload          267 drivers/gpu/drm/i915/gvt/scheduler.c 		atomic_set(&workload->shadow_ctx_active, 1);
workload          270 drivers/gpu/drm/i915/gvt/scheduler.c 		save_ring_hw_state(workload->vgpu, ring_id);
workload          271 drivers/gpu/drm/i915/gvt/scheduler.c 		atomic_set(&workload->shadow_ctx_active, 0);
workload          274 drivers/gpu/drm/i915/gvt/scheduler.c 		save_ring_hw_state(workload->vgpu, ring_id);
workload          280 drivers/gpu/drm/i915/gvt/scheduler.c 	wake_up(&workload->shadow_ctx_status_wq);
workload          286 drivers/gpu/drm/i915/gvt/scheduler.c 				 struct intel_vgpu_workload *workload)
workload          295 drivers/gpu/drm/i915/gvt/scheduler.c 	desc |= workload->ctx_desc.addressing_mode <<
workload          301 drivers/gpu/drm/i915/gvt/scheduler.c static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
workload          303 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload          304 drivers/gpu/drm/i915/gvt/scheduler.c 	struct i915_request *req = workload->req;
workload          331 drivers/gpu/drm/i915/gvt/scheduler.c 	cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
workload          334 drivers/gpu/drm/i915/gvt/scheduler.c 			workload->rb_len);
workload          338 drivers/gpu/drm/i915/gvt/scheduler.c 	shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
workload          341 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->shadow_ring_buffer_va = cs;
workload          344 drivers/gpu/drm/i915/gvt/scheduler.c 			workload->rb_len);
workload          346 drivers/gpu/drm/i915/gvt/scheduler.c 	cs += workload->rb_len / sizeof(u32);
workload          347 drivers/gpu/drm/i915/gvt/scheduler.c 	intel_ring_advance(workload->req, cs);
workload          364 drivers/gpu/drm/i915/gvt/scheduler.c static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
workload          367 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu_mm *mm = workload->shadow_mm;
workload          388 drivers/gpu/drm/i915/gvt/scheduler.c intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
workload          390 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload          397 drivers/gpu/drm/i915/gvt/scheduler.c 	if (workload->req)
workload          400 drivers/gpu/drm/i915/gvt/scheduler.c 	rq = i915_request_create(s->shadow[workload->ring_id]);
workload          406 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->req = i915_request_get(rq);
workload          418 drivers/gpu/drm/i915/gvt/scheduler.c int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
workload          420 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload          427 drivers/gpu/drm/i915/gvt/scheduler.c 	if (workload->shadow)
workload          430 drivers/gpu/drm/i915/gvt/scheduler.c 	if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
workload          431 drivers/gpu/drm/i915/gvt/scheduler.c 		shadow_context_descriptor_update(s->shadow[workload->ring_id],
workload          432 drivers/gpu/drm/i915/gvt/scheduler.c 						 workload);
workload          434 drivers/gpu/drm/i915/gvt/scheduler.c 	ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
workload          438 drivers/gpu/drm/i915/gvt/scheduler.c 	if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
workload          439 drivers/gpu/drm/i915/gvt/scheduler.c 		ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
workload          444 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->shadow = true;
workload          447 drivers/gpu/drm/i915/gvt/scheduler.c 	release_shadow_wa_ctx(&workload->wa_ctx);
workload          451 drivers/gpu/drm/i915/gvt/scheduler.c static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
workload          453 drivers/gpu/drm/i915/gvt/scheduler.c static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
workload          455 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_gvt *gvt = workload->vgpu->gvt;
workload          460 drivers/gpu/drm/i915/gvt/scheduler.c 	list_for_each_entry(bb, &workload->shadow_bb, list) {
workload          469 drivers/gpu/drm/i915/gvt/scheduler.c 			bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
workload          515 drivers/gpu/drm/i915/gvt/scheduler.c 						      workload->req,
workload          526 drivers/gpu/drm/i915/gvt/scheduler.c 	release_shadow_batch_buffer(workload);
workload          532 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu_workload *workload =
workload          534 drivers/gpu/drm/i915/gvt/scheduler.c 	struct i915_request *rq = workload->req;
workload          575 drivers/gpu/drm/i915/gvt/scheduler.c static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
workload          577 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload          581 drivers/gpu/drm/i915/gvt/scheduler.c 	ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
workload          582 drivers/gpu/drm/i915/gvt/scheduler.c 	vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start;
workload          585 drivers/gpu/drm/i915/gvt/scheduler.c static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
workload          587 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload          591 drivers/gpu/drm/i915/gvt/scheduler.c 	if (list_empty(&workload->shadow_bb))
workload          594 drivers/gpu/drm/i915/gvt/scheduler.c 	bb = list_first_entry(&workload->shadow_bb,
workload          599 drivers/gpu/drm/i915/gvt/scheduler.c 	list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
workload          620 drivers/gpu/drm/i915/gvt/scheduler.c static int prepare_workload(struct intel_vgpu_workload *workload)
workload          622 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload          624 drivers/gpu/drm/i915/gvt/scheduler.c 	int ring = workload->ring_id;
workload          627 drivers/gpu/drm/i915/gvt/scheduler.c 	ret = intel_vgpu_pin_mm(workload->shadow_mm);
workload          633 drivers/gpu/drm/i915/gvt/scheduler.c 	if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
workload          634 drivers/gpu/drm/i915/gvt/scheduler.c 	    !workload->shadow_mm->ppgtt_mm.shadowed) {
workload          639 drivers/gpu/drm/i915/gvt/scheduler.c 	update_shadow_pdps(workload);
workload          641 drivers/gpu/drm/i915/gvt/scheduler.c 	set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
workload          643 drivers/gpu/drm/i915/gvt/scheduler.c 	ret = intel_vgpu_sync_oos_pages(workload->vgpu);
workload          649 drivers/gpu/drm/i915/gvt/scheduler.c 	ret = intel_vgpu_flush_post_shadow(workload->vgpu);
workload          655 drivers/gpu/drm/i915/gvt/scheduler.c 	ret = copy_workload_to_ring_buffer(workload);
workload          661 drivers/gpu/drm/i915/gvt/scheduler.c 	ret = prepare_shadow_batch_buffer(workload);
workload          667 drivers/gpu/drm/i915/gvt/scheduler.c 	ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
workload          673 drivers/gpu/drm/i915/gvt/scheduler.c 	if (workload->prepare) {
workload          674 drivers/gpu/drm/i915/gvt/scheduler.c 		ret = workload->prepare(workload);
workload          681 drivers/gpu/drm/i915/gvt/scheduler.c 	release_shadow_wa_ctx(&workload->wa_ctx);
workload          683 drivers/gpu/drm/i915/gvt/scheduler.c 	release_shadow_batch_buffer(workload);
workload          685 drivers/gpu/drm/i915/gvt/scheduler.c 	intel_vgpu_unpin_mm(workload->shadow_mm);
workload          689 drivers/gpu/drm/i915/gvt/scheduler.c static int dispatch_workload(struct intel_vgpu_workload *workload)
workload          691 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload          694 drivers/gpu/drm/i915/gvt/scheduler.c 	int ring_id = workload->ring_id;
workload          698 drivers/gpu/drm/i915/gvt/scheduler.c 		ring_id, workload);
workload          703 drivers/gpu/drm/i915/gvt/scheduler.c 	ret = intel_gvt_workload_req_alloc(workload);
workload          707 drivers/gpu/drm/i915/gvt/scheduler.c 	ret = intel_gvt_scan_and_shadow_workload(workload);
workload          711 drivers/gpu/drm/i915/gvt/scheduler.c 	ret = populate_shadow_context(workload);
workload          713 drivers/gpu/drm/i915/gvt/scheduler.c 		release_shadow_wa_ctx(&workload->wa_ctx);
workload          717 drivers/gpu/drm/i915/gvt/scheduler.c 	ret = prepare_workload(workload);
workload          723 drivers/gpu/drm/i915/gvt/scheduler.c 		rq = fetch_and_zero(&workload->req);
workload          727 drivers/gpu/drm/i915/gvt/scheduler.c 	if (!IS_ERR_OR_NULL(workload->req)) {
workload          729 drivers/gpu/drm/i915/gvt/scheduler.c 				ring_id, workload->req);
workload          730 drivers/gpu/drm/i915/gvt/scheduler.c 		i915_request_add(workload->req);
workload          731 drivers/gpu/drm/i915/gvt/scheduler.c 		workload->dispatched = true;
workload          735 drivers/gpu/drm/i915/gvt/scheduler.c 		workload->status = ret;
workload          745 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu_workload *workload = NULL;
workload          772 drivers/gpu/drm/i915/gvt/scheduler.c 		workload = scheduler->current_workload[ring_id];
workload          774 drivers/gpu/drm/i915/gvt/scheduler.c 				ring_id, workload);
workload          788 drivers/gpu/drm/i915/gvt/scheduler.c 	workload = scheduler->current_workload[ring_id];
workload          790 drivers/gpu/drm/i915/gvt/scheduler.c 	gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
workload          792 drivers/gpu/drm/i915/gvt/scheduler.c 	atomic_inc(&workload->vgpu->submission.running_workload_num);
workload          795 drivers/gpu/drm/i915/gvt/scheduler.c 	return workload;
workload          798 drivers/gpu/drm/i915/gvt/scheduler.c static void update_guest_context(struct intel_vgpu_workload *workload)
workload          800 drivers/gpu/drm/i915/gvt/scheduler.c 	struct i915_request *rq = workload->req;
workload          801 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload          815 drivers/gpu/drm/i915/gvt/scheduler.c 		      workload->ctx_desc.lrca);
workload          817 drivers/gpu/drm/i915/gvt/scheduler.c 	head = workload->rb_head;
workload          818 drivers/gpu/drm/i915/gvt/scheduler.c 	tail = workload->rb_tail;
workload          819 drivers/gpu/drm/i915/gvt/scheduler.c 	wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
workload          830 drivers/gpu/drm/i915/gvt/scheduler.c 	ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
workload          844 drivers/gpu/drm/i915/gvt/scheduler.c 				(u32)((workload->ctx_desc.lrca + i) <<
workload          859 drivers/gpu/drm/i915/gvt/scheduler.c 	intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
workload          860 drivers/gpu/drm/i915/gvt/scheduler.c 		RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
workload          866 drivers/gpu/drm/i915/gvt/scheduler.c 	intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
workload          875 drivers/gpu/drm/i915/gvt/scheduler.c 			workload->ring_context_gpa +
workload          907 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu_workload *workload =
workload          909 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload          911 drivers/gpu/drm/i915/gvt/scheduler.c 	struct i915_request *rq = workload->req;
workload          922 drivers/gpu/drm/i915/gvt/scheduler.c 		wait_event(workload->shadow_ctx_status_wq,
workload          923 drivers/gpu/drm/i915/gvt/scheduler.c 			   !atomic_read(&workload->shadow_ctx_active));
workload          930 drivers/gpu/drm/i915/gvt/scheduler.c 		if (likely(workload->status == -EINPROGRESS)) {
workload          931 drivers/gpu/drm/i915/gvt/scheduler.c 			if (workload->req->fence.error == -EIO)
workload          932 drivers/gpu/drm/i915/gvt/scheduler.c 				workload->status = -EIO;
workload          934 drivers/gpu/drm/i915/gvt/scheduler.c 				workload->status = 0;
workload          937 drivers/gpu/drm/i915/gvt/scheduler.c 		if (!workload->status &&
workload          939 drivers/gpu/drm/i915/gvt/scheduler.c 			update_guest_context(workload);
workload          941 drivers/gpu/drm/i915/gvt/scheduler.c 			for_each_set_bit(event, workload->pending_events,
workload          946 drivers/gpu/drm/i915/gvt/scheduler.c 		i915_request_put(fetch_and_zero(&workload->req));
workload          950 drivers/gpu/drm/i915/gvt/scheduler.c 			ring_id, workload, workload->status);
workload          954 drivers/gpu/drm/i915/gvt/scheduler.c 	list_del_init(&workload->list);
workload          956 drivers/gpu/drm/i915/gvt/scheduler.c 	if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
workload          973 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->complete(workload);
workload          996 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu_workload *workload = NULL;
workload         1010 drivers/gpu/drm/i915/gvt/scheduler.c 			workload = pick_next_workload(gvt, ring_id);
workload         1011 drivers/gpu/drm/i915/gvt/scheduler.c 			if (workload)
workload         1018 drivers/gpu/drm/i915/gvt/scheduler.c 		if (!workload)
workload         1022 drivers/gpu/drm/i915/gvt/scheduler.c 				workload->ring_id, workload,
workload         1023 drivers/gpu/drm/i915/gvt/scheduler.c 				workload->vgpu->id);
workload         1028 drivers/gpu/drm/i915/gvt/scheduler.c 				workload->ring_id, workload);
workload         1039 drivers/gpu/drm/i915/gvt/scheduler.c 		update_vreg_in_ctx(workload);
workload         1041 drivers/gpu/drm/i915/gvt/scheduler.c 		ret = dispatch_workload(workload);
workload         1044 drivers/gpu/drm/i915/gvt/scheduler.c 			vgpu = workload->vgpu;
workload         1050 drivers/gpu/drm/i915/gvt/scheduler.c 				workload->ring_id, workload);
workload         1051 drivers/gpu/drm/i915/gvt/scheduler.c 		i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
workload         1055 drivers/gpu/drm/i915/gvt/scheduler.c 				workload, workload->status);
workload         1378 drivers/gpu/drm/i915/gvt/scheduler.c void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
workload         1380 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu_submission *s = &workload->vgpu->submission;
workload         1382 drivers/gpu/drm/i915/gvt/scheduler.c 	release_shadow_batch_buffer(workload);
workload         1383 drivers/gpu/drm/i915/gvt/scheduler.c 	release_shadow_wa_ctx(&workload->wa_ctx);
workload         1385 drivers/gpu/drm/i915/gvt/scheduler.c 	if (workload->shadow_mm)
workload         1386 drivers/gpu/drm/i915/gvt/scheduler.c 		intel_vgpu_mm_put(workload->shadow_mm);
workload         1388 drivers/gpu/drm/i915/gvt/scheduler.c 	kmem_cache_free(s->workloads, workload);
workload         1395 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu_workload *workload;
workload         1397 drivers/gpu/drm/i915/gvt/scheduler.c 	workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
workload         1398 drivers/gpu/drm/i915/gvt/scheduler.c 	if (!workload)
workload         1401 drivers/gpu/drm/i915/gvt/scheduler.c 	INIT_LIST_HEAD(&workload->list);
workload         1402 drivers/gpu/drm/i915/gvt/scheduler.c 	INIT_LIST_HEAD(&workload->shadow_bb);
workload         1404 drivers/gpu/drm/i915/gvt/scheduler.c 	init_waitqueue_head(&workload->shadow_ctx_status_wq);
workload         1405 drivers/gpu/drm/i915/gvt/scheduler.c 	atomic_set(&workload->shadow_ctx_active, 0);
workload         1407 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->status = -EINPROGRESS;
workload         1408 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->vgpu = vgpu;
workload         1410 drivers/gpu/drm/i915/gvt/scheduler.c 	return workload;
workload         1429 drivers/gpu/drm/i915/gvt/scheduler.c static int prepare_mm(struct intel_vgpu_workload *workload)
workload         1431 drivers/gpu/drm/i915/gvt/scheduler.c 	struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
workload         1433 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu *vgpu = workload->vgpu;
workload         1449 drivers/gpu/drm/i915/gvt/scheduler.c 	read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
workload         1451 drivers/gpu/drm/i915/gvt/scheduler.c 	mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
workload         1455 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->shadow_mm = mm;
workload         1482 drivers/gpu/drm/i915/gvt/scheduler.c 	struct intel_vgpu_workload *workload = NULL;
workload         1539 drivers/gpu/drm/i915/gvt/scheduler.c 	workload = alloc_workload(vgpu);
workload         1540 drivers/gpu/drm/i915/gvt/scheduler.c 	if (IS_ERR(workload))
workload         1541 drivers/gpu/drm/i915/gvt/scheduler.c 		return workload;
workload         1543 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->ring_id = ring_id;
workload         1544 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->ctx_desc = *desc;
workload         1545 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->ring_context_gpa = ring_context_gpa;
workload         1546 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->rb_head = head;
workload         1547 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->guest_rb_head = guest_head;
workload         1548 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->rb_tail = tail;
workload         1549 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->rb_start = start;
workload         1550 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->rb_ctl = ctl;
workload         1558 drivers/gpu/drm/i915/gvt/scheduler.c 		workload->wa_ctx.indirect_ctx.guest_gma =
workload         1560 drivers/gpu/drm/i915/gvt/scheduler.c 		workload->wa_ctx.indirect_ctx.size =
workload         1564 drivers/gpu/drm/i915/gvt/scheduler.c 		if (workload->wa_ctx.indirect_ctx.size != 0) {
workload         1566 drivers/gpu/drm/i915/gvt/scheduler.c 				workload->wa_ctx.indirect_ctx.guest_gma,
workload         1567 drivers/gpu/drm/i915/gvt/scheduler.c 				workload->wa_ctx.indirect_ctx.size)) {
workload         1569 drivers/gpu/drm/i915/gvt/scheduler.c 				    workload->wa_ctx.indirect_ctx.guest_gma);
workload         1570 drivers/gpu/drm/i915/gvt/scheduler.c 				kmem_cache_free(s->workloads, workload);
workload         1575 drivers/gpu/drm/i915/gvt/scheduler.c 		workload->wa_ctx.per_ctx.guest_gma =
workload         1577 drivers/gpu/drm/i915/gvt/scheduler.c 		workload->wa_ctx.per_ctx.valid = per_ctx & 1;
workload         1578 drivers/gpu/drm/i915/gvt/scheduler.c 		if (workload->wa_ctx.per_ctx.valid) {
workload         1580 drivers/gpu/drm/i915/gvt/scheduler.c 				workload->wa_ctx.per_ctx.guest_gma,
workload         1583 drivers/gpu/drm/i915/gvt/scheduler.c 					workload->wa_ctx.per_ctx.guest_gma);
workload         1584 drivers/gpu/drm/i915/gvt/scheduler.c 				kmem_cache_free(s->workloads, workload);
workload         1591 drivers/gpu/drm/i915/gvt/scheduler.c 			workload, ring_id, head, tail, start, ctl);
workload         1593 drivers/gpu/drm/i915/gvt/scheduler.c 	ret = prepare_mm(workload);
workload         1595 drivers/gpu/drm/i915/gvt/scheduler.c 		kmem_cache_free(s->workloads, workload);
workload         1605 drivers/gpu/drm/i915/gvt/scheduler.c 		ret = intel_gvt_scan_and_shadow_workload(workload);
workload         1613 drivers/gpu/drm/i915/gvt/scheduler.c 		intel_vgpu_destroy_workload(workload);
workload         1617 drivers/gpu/drm/i915/gvt/scheduler.c 	return workload;
workload         1624 drivers/gpu/drm/i915/gvt/scheduler.c void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
workload         1626 drivers/gpu/drm/i915/gvt/scheduler.c 	list_add_tail(&workload->list,
workload         1627 drivers/gpu/drm/i915/gvt/scheduler.c 		workload_q_head(workload->vgpu, workload->ring_id));
workload         1628 drivers/gpu/drm/i915/gvt/scheduler.c 	intel_gvt_kick_schedule(workload->vgpu->gvt);
workload         1629 drivers/gpu/drm/i915/gvt/scheduler.c 	wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
workload          135 drivers/gpu/drm/i915/gvt/scheduler.h void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
workload          161 drivers/gpu/drm/i915/gvt/scheduler.h void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
workload          231 drivers/gpu/drm/i915/gvt/trace.h 		void *workload, const char *cmd_name),
workload          234 drivers/gpu/drm/i915/gvt/trace.h 		buf_addr_type, workload, cmd_name),
workload          243 drivers/gpu/drm/i915/gvt/trace.h 		__field(void*, workload)
workload          255 drivers/gpu/drm/i915/gvt/trace.h 		__entry->workload = workload;
workload          271 drivers/gpu/drm/i915/gvt/trace.h 		__entry->workload)
workload         1094 tools/perf/builtin-record.c 	thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
workload         1509 tools/perf/builtin-record.c 						   rec->evlist->workload.pid,
workload         1529 tools/perf/builtin-record.c 						  rec->evlist->workload.pid,
workload         1676 tools/perf/builtin-record.c 			kill(rec->evlist->workload.pid, SIGTERM);
workload          454 tools/perf/builtin-stat.c 		child_pid = evsel_list->workload.pid;
workload           55 tools/perf/tests/event-times.c 	waitpid(evlist->workload.pid, NULL, 0);
workload          113 tools/perf/tests/perf-record.c 	err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
workload          125 tools/perf/tests/perf-record.c 	if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
workload          211 tools/perf/tests/perf-record.c 				if ((pid_t)sample.pid != evlist->workload.pid) {
workload          213 tools/perf/tests/perf-record.c 						 name, evlist->workload.pid, sample.pid);
workload          217 tools/perf/tests/perf-record.c 				if ((pid_t)sample.tid != evlist->workload.pid) {
workload          219 tools/perf/tests/perf-record.c 						 name, evlist->workload.pid, sample.tid);
workload          228 tools/perf/tests/perf-record.c 				     (pid_t)event->comm.pid != evlist->workload.pid) {
workload           61 tools/perf/util/evlist.c 	evlist->workload.pid = -1;
workload         1287 tools/perf/util/evlist.c 	evlist->workload.pid = fork();
workload         1288 tools/perf/util/evlist.c 	if (evlist->workload.pid < 0) {
workload         1293 tools/perf/util/evlist.c 	if (!evlist->workload.pid) {
workload         1357 tools/perf/util/evlist.c 		perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
workload         1371 tools/perf/util/evlist.c 	evlist->workload.cork_fd = go_pipe[1];
workload         1386 tools/perf/util/evlist.c 	if (evlist->workload.cork_fd > 0) {
workload         1392 tools/perf/util/evlist.c 		ret = write(evlist->workload.cork_fd, &bf, 1);
workload         1396 tools/perf/util/evlist.c 		close(evlist->workload.cork_fd);
workload           62 tools/perf/util/evlist.h 	} workload;