Home
last modified time | relevance | path

Searched refs:dqm (Results 1 – 11 of 11) sorted by relevance

/linux-4.1.27/drivers/gpu/drm/amd/amdkfd/
Dkfd_device_queue_manager.c40 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
43 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
47 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
48 static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock);
50 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
54 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
65 unsigned int get_first_pipe(struct device_queue_manager *dqm) in get_first_pipe() argument
67 BUG_ON(!dqm || !dqm->dev); in get_first_pipe()
68 return dqm->dev->shared_resources.first_compute_pipe; in get_first_pipe()
71 unsigned int get_pipes_num(struct device_queue_manager *dqm) in get_pipes_num() argument
[all …]
Dkfd_device_queue_manager.h87 int (*create_queue)(struct device_queue_manager *dqm,
91 int (*destroy_queue)(struct device_queue_manager *dqm,
94 int (*update_queue)(struct device_queue_manager *dqm,
98 (struct device_queue_manager *dqm,
101 int (*register_process)(struct device_queue_manager *dqm,
103 int (*unregister_process)(struct device_queue_manager *dqm,
105 int (*initialize)(struct device_queue_manager *dqm);
106 int (*start)(struct device_queue_manager *dqm);
107 int (*stop)(struct device_queue_manager *dqm);
108 void (*uninitialize)(struct device_queue_manager *dqm);
[all …]
Dkfd_process_queue_manager.c181 dev->dqm->ops.register_process(dev->dqm, &pdd->qpd); in pqm_create_queue()
195 ((dev->dqm->processes_count >= VMID_PER_DEVICE) || in pqm_create_queue()
196 (dev->dqm->queue_count >= PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE))) { in pqm_create_queue()
207 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, in pqm_create_queue()
221 retval = dev->dqm->ops.create_kernel_queue(dev->dqm, in pqm_create_queue()
252 dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd); in pqm_create_queue()
260 struct device_queue_manager *dqm; in pqm_destroy_queue() local
264 dqm = NULL; in pqm_destroy_queue()
292 dqm = pqn->kq->dev->dqm; in pqm_destroy_queue()
293 dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd); in pqm_destroy_queue()
[all …]
Dkfd_device_queue_manager_cik.c27 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
33 static int register_process_cik(struct device_queue_manager *dqm,
35 static int initialize_cpsch_cik(struct device_queue_manager *dqm);
71 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, in set_cache_memory_policy_cik() argument
97 static int register_process_cik(struct device_queue_manager *dqm, in register_process_cik() argument
103 BUG_ON(!dqm || !qpd); in register_process_cik()
132 static int initialize_cpsch_cik(struct device_queue_manager *dqm) in initialize_cpsch_cik() argument
134 return init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); in initialize_cpsch_cik()
Dkfd_device_queue_manager_vi.c26 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
32 static int register_process_vi(struct device_queue_manager *dqm,
34 static int initialize_cpsch_vi(struct device_queue_manager *dqm);
45 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, in set_cache_memory_policy_vi() argument
55 static int register_process_vi(struct device_queue_manager *dqm, in register_process_vi() argument
61 static int initialize_cpsch_vi(struct device_queue_manager *dqm) in initialize_cpsch_vi() argument
Dkfd_device.c247 kfd->dqm = device_queue_manager_init(kfd); in kgd2kfd_device_init()
248 if (!kfd->dqm) { in kgd2kfd_device_init()
255 if (kfd->dqm->ops.start(kfd->dqm) != 0) { in kgd2kfd_device_init()
272 device_queue_manager_uninit(kfd->dqm); in kgd2kfd_device_init()
291 device_queue_manager_uninit(kfd->dqm); in kgd2kfd_device_exit()
306 kfd->dqm->ops.stop(kfd->dqm); in kgd2kfd_suspend()
327 kfd->dqm->ops.start(kfd->dqm); in kgd2kfd_resume()
Dkfd_packet_manager.c61 process_count = pm->dqm->processes_count; in pm_calc_rlib_size()
62 queue_count = pm->dqm->queue_count; in pm_calc_rlib_size()
100 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, in pm_allocate_runlist_ib()
263 pm->dqm->processes_count, pm->dqm->queue_count); in pm_create_runlist_ib()
269 if (proccesses_mapped >= pm->dqm->processes_count) { in pm_create_runlist_ib()
317 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) in pm_init() argument
319 BUG_ON(!dqm); in pm_init()
321 pm->dqm = dqm; in pm_init()
323 pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ); in pm_init()
553 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj); in pm_release_ib()
Dkfd_priv.h165 struct device_queue_manager *dqm; member
383 struct device_queue_manager *dqm; member
585 void device_queue_manager_uninit(struct device_queue_manager *dqm);
619 struct device_queue_manager *dqm; member
626 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
Dkfd_kernel_queue.c59 kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm, in initialize()
Dkfd_process.c341 pdd->qpd.dqm = dev->dqm; in kfd_create_process_device_data()
Dkfd_chardev.c419 if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm, in kfd_ioctl_set_memory_policy()