1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_drv.h"
26
27/**
28 * DOC: batch pool
29 *
30 * In order to submit batch buffers as 'secure', the software command parser
31 * must ensure that a batch buffer cannot be modified after parsing. It does
32 * this by copying the user provided batch buffer contents to a kernel owned
33 * buffer from which the hardware will actually execute, and by carefully
34 * managing the address space bindings for such buffers.
35 *
36 * The batch pool framework provides a mechanism for the driver to manage a
37 * set of scratch buffers to use for this purpose. The framework can be
38 * extended to support other uses cases should they arise.
39 */
40
41/**
42 * i915_gem_batch_pool_init() - initialize a batch buffer pool
43 * @dev: the drm device
44 * @pool: the batch buffer pool
45 */
46void i915_gem_batch_pool_init(struct drm_device *dev,
47			      struct i915_gem_batch_pool *pool)
48{
49	pool->dev = dev;
50	INIT_LIST_HEAD(&pool->cache_list);
51}
52
53/**
54 * i915_gem_batch_pool_fini() - clean up a batch buffer pool
55 * @pool: the pool to clean up
56 *
57 * Note: Callers must hold the struct_mutex.
58 */
59void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
60{
61	WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
62
63	while (!list_empty(&pool->cache_list)) {
64		struct drm_i915_gem_object *obj =
65			list_first_entry(&pool->cache_list,
66					 struct drm_i915_gem_object,
67					 batch_pool_list);
68
69		WARN_ON(obj->active);
70
71		list_del_init(&obj->batch_pool_list);
72		drm_gem_object_unreference(&obj->base);
73	}
74}
75
76/**
77 * i915_gem_batch_pool_get() - select a buffer from the pool
78 * @pool: the batch buffer pool
79 * @size: the minimum desired size of the returned buffer
80 *
81 * Finds or allocates a batch buffer in the pool with at least the requested
82 * size. The caller is responsible for any domain, active/inactive, or
83 * purgeability management for the returned buffer.
84 *
85 * Note: Callers must hold the struct_mutex
86 *
87 * Return: the selected batch buffer object
88 */
89struct drm_i915_gem_object *
90i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
91			size_t size)
92{
93	struct drm_i915_gem_object *obj = NULL;
94	struct drm_i915_gem_object *tmp, *next;
95
96	WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
97
98	list_for_each_entry_safe(tmp, next,
99			&pool->cache_list, batch_pool_list) {
100
101		if (tmp->active)
102			continue;
103
104		/* While we're looping, do some clean up */
105		if (tmp->madv == __I915_MADV_PURGED) {
106			list_del(&tmp->batch_pool_list);
107			drm_gem_object_unreference(&tmp->base);
108			continue;
109		}
110
111		/*
112		 * Select a buffer that is at least as big as needed
113		 * but not 'too much' bigger. A better way to do this
114		 * might be to bucket the pool objects based on size.
115		 */
116		if (tmp->base.size >= size &&
117		    tmp->base.size <= (2 * size)) {
118			obj = tmp;
119			break;
120		}
121	}
122
123	if (!obj) {
124		obj = i915_gem_alloc_object(pool->dev, size);
125		if (!obj)
126			return ERR_PTR(-ENOMEM);
127
128		list_add_tail(&obj->batch_pool_list, &pool->cache_list);
129	}
130	else
131		/* Keep list in LRU order */
132		list_move_tail(&obj->batch_pool_list, &pool->cache_list);
133
134	obj->madv = I915_MADV_WILLNEED;
135
136	return obj;
137}
138