1/*
2 * UWB DRP IE management.
3 *
4 * Copyright (C) 2005-2006 Intel Corporation
5 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18 */
19#include <linux/kernel.h>
20#include <linux/random.h>
21#include <linux/slab.h>
22#include <linux/uwb.h>
23
24#include "uwb-internal.h"
25
26
27/*
28 * Return the reason code for a reservations's DRP IE.
29 */
30static int uwb_rsv_reason_code(struct uwb_rsv *rsv)
31{
32	static const int reason_codes[] = {
33		[UWB_RSV_STATE_O_INITIATED]          = UWB_DRP_REASON_ACCEPTED,
34		[UWB_RSV_STATE_O_PENDING]            = UWB_DRP_REASON_ACCEPTED,
35		[UWB_RSV_STATE_O_MODIFIED]           = UWB_DRP_REASON_MODIFIED,
36		[UWB_RSV_STATE_O_ESTABLISHED]        = UWB_DRP_REASON_ACCEPTED,
37		[UWB_RSV_STATE_O_TO_BE_MOVED]        = UWB_DRP_REASON_ACCEPTED,
38		[UWB_RSV_STATE_O_MOVE_COMBINING]     = UWB_DRP_REASON_MODIFIED,
39		[UWB_RSV_STATE_O_MOVE_REDUCING]      = UWB_DRP_REASON_MODIFIED,
40		[UWB_RSV_STATE_O_MOVE_EXPANDING]     = UWB_DRP_REASON_ACCEPTED,
41		[UWB_RSV_STATE_T_ACCEPTED]           = UWB_DRP_REASON_ACCEPTED,
42		[UWB_RSV_STATE_T_CONFLICT]           = UWB_DRP_REASON_CONFLICT,
43		[UWB_RSV_STATE_T_PENDING]            = UWB_DRP_REASON_PENDING,
44		[UWB_RSV_STATE_T_DENIED]             = UWB_DRP_REASON_DENIED,
45		[UWB_RSV_STATE_T_RESIZED]            = UWB_DRP_REASON_ACCEPTED,
46		[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
47		[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT,
48		[UWB_RSV_STATE_T_EXPANDING_PENDING]  = UWB_DRP_REASON_PENDING,
49		[UWB_RSV_STATE_T_EXPANDING_DENIED]   = UWB_DRP_REASON_DENIED,
50	};
51
52	return reason_codes[rsv->state];
53}
54
55/*
56 * Return the reason code for a reservations's companion DRP IE .
57 */
58static int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv)
59{
60	static const int companion_reason_codes[] = {
61		[UWB_RSV_STATE_O_MOVE_EXPANDING]     = UWB_DRP_REASON_ACCEPTED,
62		[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
63		[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT,
64		[UWB_RSV_STATE_T_EXPANDING_PENDING]  = UWB_DRP_REASON_PENDING,
65		[UWB_RSV_STATE_T_EXPANDING_DENIED]   = UWB_DRP_REASON_DENIED,
66	};
67
68	return companion_reason_codes[rsv->state];
69}
70
71/*
72 * Return the status bit for a reservations's DRP IE.
73 */
74int uwb_rsv_status(struct uwb_rsv *rsv)
75{
76	static const int statuses[] = {
77		[UWB_RSV_STATE_O_INITIATED]          = 0,
78		[UWB_RSV_STATE_O_PENDING]            = 0,
79		[UWB_RSV_STATE_O_MODIFIED]           = 1,
80		[UWB_RSV_STATE_O_ESTABLISHED]        = 1,
81		[UWB_RSV_STATE_O_TO_BE_MOVED]        = 0,
82		[UWB_RSV_STATE_O_MOVE_COMBINING]     = 1,
83		[UWB_RSV_STATE_O_MOVE_REDUCING]      = 1,
84		[UWB_RSV_STATE_O_MOVE_EXPANDING]     = 1,
85		[UWB_RSV_STATE_T_ACCEPTED]           = 1,
86		[UWB_RSV_STATE_T_CONFLICT]           = 0,
87		[UWB_RSV_STATE_T_PENDING]            = 0,
88		[UWB_RSV_STATE_T_DENIED]             = 0,
89		[UWB_RSV_STATE_T_RESIZED]            = 1,
90		[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1,
91		[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1,
92		[UWB_RSV_STATE_T_EXPANDING_PENDING]  = 1,
93		[UWB_RSV_STATE_T_EXPANDING_DENIED]   = 1,
94
95	};
96
97	return statuses[rsv->state];
98}
99
100/*
101 * Return the status bit for a reservations's companion DRP IE .
102 */
103int uwb_rsv_companion_status(struct uwb_rsv *rsv)
104{
105	static const int companion_statuses[] = {
106		[UWB_RSV_STATE_O_MOVE_EXPANDING]     = 0,
107		[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1,
108		[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0,
109		[UWB_RSV_STATE_T_EXPANDING_PENDING]  = 0,
110		[UWB_RSV_STATE_T_EXPANDING_DENIED]   = 0,
111	};
112
113	return companion_statuses[rsv->state];
114}
115
116/*
117 * Allocate a DRP IE.
118 *
119 * To save having to free/allocate a DRP IE when its MAS changes,
120 * enough memory is allocated for the maxiumum number of DRP
121 * allocation fields.  This gives an overhead per reservation of up to
122 * (UWB_NUM_ZONES - 1) * 4 = 60 octets.
123 */
124static struct uwb_ie_drp *uwb_drp_ie_alloc(void)
125{
126	struct uwb_ie_drp *drp_ie;
127
128	drp_ie = kzalloc(sizeof(struct uwb_ie_drp) +
129			UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc),
130			GFP_KERNEL);
131	if (drp_ie) {
132		drp_ie->hdr.element_id = UWB_IE_DRP;
133	}
134	return drp_ie;
135}
136
137
138/*
139 * Fill a DRP IE's allocation fields from a MAS bitmap.
140 */
141static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie,
142			       struct uwb_mas_bm *mas)
143{
144	int z, i, num_fields = 0, next = 0;
145	struct uwb_drp_alloc *zones;
146	__le16 current_bmp;
147	DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS);
148	DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE);
149
150	zones = drp_ie->allocs;
151
152	bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS);
153
154	/* Determine unique MAS bitmaps in zones from bitmap. */
155	for (z = 0; z < UWB_NUM_ZONES; z++) {
156		bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE);
157		if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) {
158			bool found = false;
159			current_bmp = (__le16) *tmp_mas_bm;
160			for (i = 0; i < next; i++) {
161				if (current_bmp == zones[i].mas_bm) {
162					zones[i].zone_bm |= 1 << z;
163					found = true;
164					break;
165				}
166			}
167			if (!found)  {
168				num_fields++;
169				zones[next].zone_bm = 1 << z;
170				zones[next].mas_bm = current_bmp;
171				next++;
172			}
173		}
174		bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
175	}
176
177	/* Store in format ready for transmission (le16). */
178	for (i = 0; i < num_fields; i++) {
179		drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm);
180		drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm);
181	}
182
183	drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr)
184		+ num_fields * sizeof(struct uwb_drp_alloc);
185}
186
187/**
188 * uwb_drp_ie_update - update a reservation's DRP IE
189 * @rsv: the reservation
190 */
191int uwb_drp_ie_update(struct uwb_rsv *rsv)
192{
193	struct uwb_ie_drp *drp_ie;
194	struct uwb_rsv_move *mv;
195	int unsafe;
196
197	if (rsv->state == UWB_RSV_STATE_NONE) {
198		kfree(rsv->drp_ie);
199		rsv->drp_ie = NULL;
200		return 0;
201	}
202
203	unsafe = rsv->mas.unsafe ? 1 : 0;
204
205	if (rsv->drp_ie == NULL) {
206		rsv->drp_ie = uwb_drp_ie_alloc();
207		if (rsv->drp_ie == NULL)
208			return -ENOMEM;
209	}
210	drp_ie = rsv->drp_ie;
211
212	uwb_ie_drp_set_unsafe(drp_ie,       unsafe);
213	uwb_ie_drp_set_tiebreaker(drp_ie,   rsv->tiebreaker);
214	uwb_ie_drp_set_owner(drp_ie,        uwb_rsv_is_owner(rsv));
215	uwb_ie_drp_set_status(drp_ie,       uwb_rsv_status(rsv));
216	uwb_ie_drp_set_reason_code(drp_ie,  uwb_rsv_reason_code(rsv));
217	uwb_ie_drp_set_stream_index(drp_ie, rsv->stream);
218	uwb_ie_drp_set_type(drp_ie,         rsv->type);
219
220	if (uwb_rsv_is_owner(rsv)) {
221		switch (rsv->target.type) {
222		case UWB_RSV_TARGET_DEV:
223			drp_ie->dev_addr = rsv->target.dev->dev_addr;
224			break;
225		case UWB_RSV_TARGET_DEVADDR:
226			drp_ie->dev_addr = rsv->target.devaddr;
227			break;
228		}
229	} else
230		drp_ie->dev_addr = rsv->owner->dev_addr;
231
232	uwb_drp_ie_from_bm(drp_ie, &rsv->mas);
233
234	if (uwb_rsv_has_two_drp_ies(rsv)) {
235		mv = &rsv->mv;
236		if (mv->companion_drp_ie == NULL) {
237			mv->companion_drp_ie = uwb_drp_ie_alloc();
238			if (mv->companion_drp_ie == NULL)
239				return -ENOMEM;
240		}
241		drp_ie = mv->companion_drp_ie;
242
243		/* keep all the same configuration of the main drp_ie */
244		memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp));
245
246
247		/* FIXME: handle properly the unsafe bit */
248		uwb_ie_drp_set_unsafe(drp_ie,       1);
249		uwb_ie_drp_set_status(drp_ie,       uwb_rsv_companion_status(rsv));
250		uwb_ie_drp_set_reason_code(drp_ie,  uwb_rsv_companion_reason_code(rsv));
251
252		uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas);
253	}
254
255	rsv->ie_valid = true;
256	return 0;
257}
258
259/*
260 * Set MAS bits from given MAS bitmap in a single zone of large bitmap.
261 *
262 * We are given a zone id and the MAS bitmap of bits that need to be set in
263 * this zone. Note that this zone may already have bits set and this only
264 * adds settings - we cannot simply assign the MAS bitmap contents to the
265 * zone contents. We iterate over the the bits (MAS) in the zone and set the
266 * bits that are set in the given MAS bitmap.
267 */
268static
269void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm)
270{
271	int mas;
272	u16 mas_mask;
273
274	for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++) {
275		mas_mask = 1 << mas;
276		if (mas_bm & mas_mask)
277			set_bit(zone * UWB_NUM_ZONES + mas, bm->bm);
278	}
279}
280
281/**
282 * uwb_drp_ie_zones_to_bm - convert DRP allocation fields to a bitmap
283 * @mas:    MAS bitmap that will be populated to correspond to the
284 *          allocation fields in the DRP IE
285 * @drp_ie: the DRP IE that contains the allocation fields.
286 *
287 * The input format is an array of MAS allocation fields (16 bit Zone
288 * bitmap, 16 bit MAS bitmap) as described in [ECMA-368] section
289 * 16.8.6. The output is a full 256 bit MAS bitmap.
290 *
291 * We go over all the allocation fields, for each allocation field we
292 * know which zones are impacted. We iterate over all the zones
293 * impacted and call a function that will set the correct MAS bits in
294 * each zone.
295 */
296void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie)
297{
298	int numallocs = (drp_ie->hdr.length - 4) / 4;
299	const struct uwb_drp_alloc *alloc;
300	int cnt;
301	u16 zone_bm, mas_bm;
302	u8 zone;
303	u16 zone_mask;
304
305	bitmap_zero(bm->bm, UWB_NUM_MAS);
306
307	for (cnt = 0; cnt < numallocs; cnt++) {
308		alloc = &drp_ie->allocs[cnt];
309		zone_bm = le16_to_cpu(alloc->zone_bm);
310		mas_bm = le16_to_cpu(alloc->mas_bm);
311		for (zone = 0; zone < UWB_NUM_ZONES; zone++)   {
312			zone_mask = 1 << zone;
313			if (zone_bm & zone_mask)
314				uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm);
315		}
316	}
317}
318
319