1/*
2 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
3 *
4 * Based on bo.c which bears the following copyright notice,
5 * but is dual licensed:
6 *
7 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
8 * All Rights Reserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sub license, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial portions
20 * of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 **************************************************************************/
31/*
32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33 */
34
35#include <linux/reservation.h>
36#include <linux/export.h>
37
38DEFINE_WW_CLASS(reservation_ww_class);
39EXPORT_SYMBOL(reservation_ww_class);
40
41struct lock_class_key reservation_seqcount_class;
42EXPORT_SYMBOL(reservation_seqcount_class);
43
44const char reservation_seqcount_string[] = "reservation_seqcount";
45EXPORT_SYMBOL(reservation_seqcount_string);
46/*
47 * Reserve space to add a shared fence to a reservation_object,
48 * must be called with obj->lock held.
49 */
50int reservation_object_reserve_shared(struct reservation_object *obj)
51{
52	struct reservation_object_list *fobj, *old;
53	u32 max;
54
55	old = reservation_object_get_list(obj);
56
57	if (old && old->shared_max) {
58		if (old->shared_count < old->shared_max) {
59			/* perform an in-place update */
60			kfree(obj->staged);
61			obj->staged = NULL;
62			return 0;
63		} else
64			max = old->shared_max * 2;
65	} else
66		max = 4;
67
68	/*
69	 * resize obj->staged or allocate if it doesn't exist,
70	 * noop if already correct size
71	 */
72	fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
73			GFP_KERNEL);
74	if (!fobj)
75		return -ENOMEM;
76
77	obj->staged = fobj;
78	fobj->shared_max = max;
79	return 0;
80}
81EXPORT_SYMBOL(reservation_object_reserve_shared);
82
83static void
84reservation_object_add_shared_inplace(struct reservation_object *obj,
85				      struct reservation_object_list *fobj,
86				      struct fence *fence)
87{
88	u32 i;
89
90	fence_get(fence);
91
92	preempt_disable();
93	write_seqcount_begin(&obj->seq);
94
95	for (i = 0; i < fobj->shared_count; ++i) {
96		struct fence *old_fence;
97
98		old_fence = rcu_dereference_protected(fobj->shared[i],
99						reservation_object_held(obj));
100
101		if (old_fence->context == fence->context) {
102			/* memory barrier is added by write_seqcount_begin */
103			RCU_INIT_POINTER(fobj->shared[i], fence);
104			write_seqcount_end(&obj->seq);
105			preempt_enable();
106
107			fence_put(old_fence);
108			return;
109		}
110	}
111
112	/*
113	 * memory barrier is added by write_seqcount_begin,
114	 * fobj->shared_count is protected by this lock too
115	 */
116	RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
117	fobj->shared_count++;
118
119	write_seqcount_end(&obj->seq);
120	preempt_enable();
121}
122
123static void
124reservation_object_add_shared_replace(struct reservation_object *obj,
125				      struct reservation_object_list *old,
126				      struct reservation_object_list *fobj,
127				      struct fence *fence)
128{
129	unsigned i;
130	struct fence *old_fence = NULL;
131
132	fence_get(fence);
133
134	if (!old) {
135		RCU_INIT_POINTER(fobj->shared[0], fence);
136		fobj->shared_count = 1;
137		goto done;
138	}
139
140	/*
141	 * no need to bump fence refcounts, rcu_read access
142	 * requires the use of kref_get_unless_zero, and the
143	 * references from the old struct are carried over to
144	 * the new.
145	 */
146	fobj->shared_count = old->shared_count;
147
148	for (i = 0; i < old->shared_count; ++i) {
149		struct fence *check;
150
151		check = rcu_dereference_protected(old->shared[i],
152						reservation_object_held(obj));
153
154		if (!old_fence && check->context == fence->context) {
155			old_fence = check;
156			RCU_INIT_POINTER(fobj->shared[i], fence);
157		} else
158			RCU_INIT_POINTER(fobj->shared[i], check);
159	}
160	if (!old_fence) {
161		RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
162		fobj->shared_count++;
163	}
164
165done:
166	preempt_disable();
167	write_seqcount_begin(&obj->seq);
168	/*
169	 * RCU_INIT_POINTER can be used here,
170	 * seqcount provides the necessary barriers
171	 */
172	RCU_INIT_POINTER(obj->fence, fobj);
173	write_seqcount_end(&obj->seq);
174	preempt_enable();
175
176	if (old)
177		kfree_rcu(old, rcu);
178
179	if (old_fence)
180		fence_put(old_fence);
181}
182
183/*
184 * Add a fence to a shared slot, obj->lock must be held, and
185 * reservation_object_reserve_shared_fence has been called.
186 */
187void reservation_object_add_shared_fence(struct reservation_object *obj,
188					 struct fence *fence)
189{
190	struct reservation_object_list *old, *fobj = obj->staged;
191
192	old = reservation_object_get_list(obj);
193	obj->staged = NULL;
194
195	if (!fobj) {
196		BUG_ON(old->shared_count >= old->shared_max);
197		reservation_object_add_shared_inplace(obj, old, fence);
198	} else
199		reservation_object_add_shared_replace(obj, old, fobj, fence);
200}
201EXPORT_SYMBOL(reservation_object_add_shared_fence);
202
203void reservation_object_add_excl_fence(struct reservation_object *obj,
204				       struct fence *fence)
205{
206	struct fence *old_fence = reservation_object_get_excl(obj);
207	struct reservation_object_list *old;
208	u32 i = 0;
209
210	old = reservation_object_get_list(obj);
211	if (old)
212		i = old->shared_count;
213
214	if (fence)
215		fence_get(fence);
216
217	preempt_disable();
218	write_seqcount_begin(&obj->seq);
219	/* write_seqcount_begin provides the necessary memory barrier */
220	RCU_INIT_POINTER(obj->fence_excl, fence);
221	if (old)
222		old->shared_count = 0;
223	write_seqcount_end(&obj->seq);
224	preempt_enable();
225
226	/* inplace update, no shared fences */
227	while (i--)
228		fence_put(rcu_dereference_protected(old->shared[i],
229						reservation_object_held(obj)));
230
231	if (old_fence)
232		fence_put(old_fence);
233}
234EXPORT_SYMBOL(reservation_object_add_excl_fence);
235
236int reservation_object_get_fences_rcu(struct reservation_object *obj,
237				      struct fence **pfence_excl,
238				      unsigned *pshared_count,
239				      struct fence ***pshared)
240{
241	unsigned shared_count = 0;
242	unsigned retry = 1;
243	struct fence **shared = NULL, *fence_excl = NULL;
244	int ret = 0;
245
246	while (retry) {
247		struct reservation_object_list *fobj;
248		unsigned seq;
249
250		seq = read_seqcount_begin(&obj->seq);
251
252		rcu_read_lock();
253
254		fobj = rcu_dereference(obj->fence);
255		if (fobj) {
256			struct fence **nshared;
257			size_t sz = sizeof(*shared) * fobj->shared_max;
258
259			nshared = krealloc(shared, sz,
260					   GFP_NOWAIT | __GFP_NOWARN);
261			if (!nshared) {
262				rcu_read_unlock();
263				nshared = krealloc(shared, sz, GFP_KERNEL);
264				if (nshared) {
265					shared = nshared;
266					continue;
267				}
268
269				ret = -ENOMEM;
270				shared_count = 0;
271				break;
272			}
273			shared = nshared;
274			memcpy(shared, fobj->shared, sz);
275			shared_count = fobj->shared_count;
276		} else
277			shared_count = 0;
278		fence_excl = rcu_dereference(obj->fence_excl);
279
280		retry = read_seqcount_retry(&obj->seq, seq);
281		if (retry)
282			goto unlock;
283
284		if (!fence_excl || fence_get_rcu(fence_excl)) {
285			unsigned i;
286
287			for (i = 0; i < shared_count; ++i) {
288				if (fence_get_rcu(shared[i]))
289					continue;
290
291				/* uh oh, refcount failed, abort and retry */
292				while (i--)
293					fence_put(shared[i]);
294
295				if (fence_excl) {
296					fence_put(fence_excl);
297					fence_excl = NULL;
298				}
299
300				retry = 1;
301				break;
302			}
303		} else
304			retry = 1;
305
306unlock:
307		rcu_read_unlock();
308	}
309	*pshared_count = shared_count;
310	if (shared_count)
311		*pshared = shared;
312	else {
313		*pshared = NULL;
314		kfree(shared);
315	}
316	*pfence_excl = fence_excl;
317
318	return ret;
319}
320EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
321
322long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
323					 bool wait_all, bool intr,
324					 unsigned long timeout)
325{
326	struct fence *fence;
327	unsigned seq, shared_count, i = 0;
328	long ret = timeout;
329
330	if (!timeout)
331		return reservation_object_test_signaled_rcu(obj, wait_all);
332
333retry:
334	fence = NULL;
335	shared_count = 0;
336	seq = read_seqcount_begin(&obj->seq);
337	rcu_read_lock();
338
339	if (wait_all) {
340		struct reservation_object_list *fobj = rcu_dereference(obj->fence);
341
342		if (fobj)
343			shared_count = fobj->shared_count;
344
345		if (read_seqcount_retry(&obj->seq, seq))
346			goto unlock_retry;
347
348		for (i = 0; i < shared_count; ++i) {
349			struct fence *lfence = rcu_dereference(fobj->shared[i]);
350
351			if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
352				continue;
353
354			if (!fence_get_rcu(lfence))
355				goto unlock_retry;
356
357			if (fence_is_signaled(lfence)) {
358				fence_put(lfence);
359				continue;
360			}
361
362			fence = lfence;
363			break;
364		}
365	}
366
367	if (!shared_count) {
368		struct fence *fence_excl = rcu_dereference(obj->fence_excl);
369
370		if (read_seqcount_retry(&obj->seq, seq))
371			goto unlock_retry;
372
373		if (fence_excl &&
374		    !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
375			if (!fence_get_rcu(fence_excl))
376				goto unlock_retry;
377
378			if (fence_is_signaled(fence_excl))
379				fence_put(fence_excl);
380			else
381				fence = fence_excl;
382		}
383	}
384
385	rcu_read_unlock();
386	if (fence) {
387		ret = fence_wait_timeout(fence, intr, ret);
388		fence_put(fence);
389		if (ret > 0 && wait_all && (i + 1 < shared_count))
390			goto retry;
391	}
392	return ret;
393
394unlock_retry:
395	rcu_read_unlock();
396	goto retry;
397}
398EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
399
400
401static inline int
402reservation_object_test_signaled_single(struct fence *passed_fence)
403{
404	struct fence *fence, *lfence = passed_fence;
405	int ret = 1;
406
407	if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
408		fence = fence_get_rcu(lfence);
409		if (!fence)
410			return -1;
411
412		ret = !!fence_is_signaled(fence);
413		fence_put(fence);
414	}
415	return ret;
416}
417
418bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
419					  bool test_all)
420{
421	unsigned seq, shared_count;
422	int ret = true;
423
424retry:
425	shared_count = 0;
426	seq = read_seqcount_begin(&obj->seq);
427	rcu_read_lock();
428
429	if (test_all) {
430		unsigned i;
431
432		struct reservation_object_list *fobj = rcu_dereference(obj->fence);
433
434		if (fobj)
435			shared_count = fobj->shared_count;
436
437		if (read_seqcount_retry(&obj->seq, seq))
438			goto unlock_retry;
439
440		for (i = 0; i < shared_count; ++i) {
441			struct fence *fence = rcu_dereference(fobj->shared[i]);
442
443			ret = reservation_object_test_signaled_single(fence);
444			if (ret < 0)
445				goto unlock_retry;
446			else if (!ret)
447				break;
448		}
449
450		/*
451		 * There could be a read_seqcount_retry here, but nothing cares
452		 * about whether it's the old or newer fence pointers that are
453		 * signaled. That race could still have happened after checking
454		 * read_seqcount_retry. If you care, use ww_mutex_lock.
455		 */
456	}
457
458	if (!shared_count) {
459		struct fence *fence_excl = rcu_dereference(obj->fence_excl);
460
461		if (read_seqcount_retry(&obj->seq, seq))
462			goto unlock_retry;
463
464		if (fence_excl) {
465			ret = reservation_object_test_signaled_single(fence_excl);
466			if (ret < 0)
467				goto unlock_retry;
468		}
469	}
470
471	rcu_read_unlock();
472	return ret;
473
474unlock_retry:
475	rcu_read_unlock();
476	goto retry;
477}
478EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
479