1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #ifndef __AMDGPU_JOB_H__
24 #define __AMDGPU_JOB_H__
25
26
27 #define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0)
28
29 #define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1)
30
31 #define AMDGPU_HAVE_CTX_SWITCH (1 << 2)
32
33 #define AMDGPU_IB_PREEMPTED (1 << 3)
34
35 #define to_amdgpu_job(sched_job) \
36 container_of((sched_job), struct amdgpu_job, base)
37
38 #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
39
40 struct amdgpu_fence;
41
42 struct amdgpu_job {
43 struct drm_sched_job base;
44 struct amdgpu_vm *vm;
45 struct amdgpu_sync sync;
46 struct amdgpu_sync sched_sync;
47 struct amdgpu_ib *ibs;
48 struct dma_fence *fence;
49 uint32_t preamble_status;
50 uint32_t preemption_status;
51 uint32_t num_ibs;
52 void *owner;
53 bool vm_needs_flush;
54 uint64_t vm_pd_addr;
55 unsigned vmid;
56 unsigned pasid;
57 uint32_t gds_base, gds_size;
58 uint32_t gws_base, gws_size;
59 uint32_t oa_base, oa_size;
60 uint32_t vram_lost_counter;
61
62
63 uint64_t uf_addr;
64 uint64_t uf_sequence;
65
66 };
67
68 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
69 struct amdgpu_job **job, struct amdgpu_vm *vm);
70 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
71 struct amdgpu_job **job);
72
73 void amdgpu_job_free_resources(struct amdgpu_job *job);
74 void amdgpu_job_free(struct amdgpu_job *job);
75 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
76 void *owner, struct dma_fence **f);
77 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
78 struct dma_fence **fence);
79 #endif