1/*
2 * Block Translation Table
3 * Copyright (c) 2014-2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12 * more details.
13 */
14#include <linux/highmem.h>
15#include <linux/debugfs.h>
16#include <linux/blkdev.h>
17#include <linux/module.h>
18#include <linux/device.h>
19#include <linux/mutex.h>
20#include <linux/hdreg.h>
21#include <linux/genhd.h>
22#include <linux/sizes.h>
23#include <linux/ndctl.h>
24#include <linux/fs.h>
25#include <linux/nd.h>
26#include "btt.h"
27#include "nd.h"
28
29enum log_ent_request {
30	LOG_NEW_ENT = 0,
31	LOG_OLD_ENT
32};
33
34static int btt_major;
35
36static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
37		void *buf, size_t n)
38{
39	struct nd_btt *nd_btt = arena->nd_btt;
40	struct nd_namespace_common *ndns = nd_btt->ndns;
41
42	/* arena offsets are 4K from the base of the device */
43	offset += SZ_4K;
44	return nvdimm_read_bytes(ndns, offset, buf, n);
45}
46
47static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
48		void *buf, size_t n)
49{
50	struct nd_btt *nd_btt = arena->nd_btt;
51	struct nd_namespace_common *ndns = nd_btt->ndns;
52
53	/* arena offsets are 4K from the base of the device */
54	offset += SZ_4K;
55	return nvdimm_write_bytes(ndns, offset, buf, n);
56}
57
58static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
59{
60	int ret;
61
62	ret = arena_write_bytes(arena, arena->info2off, super,
63			sizeof(struct btt_sb));
64	if (ret)
65		return ret;
66
67	return arena_write_bytes(arena, arena->infooff, super,
68			sizeof(struct btt_sb));
69}
70
71static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
72{
73	WARN_ON(!super);
74	return arena_read_bytes(arena, arena->infooff, super,
75			sizeof(struct btt_sb));
76}
77
78/*
79 * 'raw' version of btt_map write
80 * Assumptions:
81 *   mapping is in little-endian
82 *   mapping contains 'E' and 'Z' flags as desired
83 */
84static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping)
85{
86	u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
87
88	WARN_ON(lba >= arena->external_nlba);
89	return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE);
90}
91
92static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
93			u32 z_flag, u32 e_flag)
94{
95	u32 ze;
96	__le32 mapping_le;
97
98	/*
99	 * This 'mapping' is supposed to be just the LBA mapping, without
100	 * any flags set, so strip the flag bits.
101	 */
102	mapping &= MAP_LBA_MASK;
103
104	ze = (z_flag << 1) + e_flag;
105	switch (ze) {
106	case 0:
107		/*
108		 * We want to set neither of the Z or E flags, and
109		 * in the actual layout, this means setting the bit
110		 * positions of both to '1' to indicate a 'normal'
111		 * map entry
112		 */
113		mapping |= MAP_ENT_NORMAL;
114		break;
115	case 1:
116		mapping |= (1 << MAP_ERR_SHIFT);
117		break;
118	case 2:
119		mapping |= (1 << MAP_TRIM_SHIFT);
120		break;
121	default:
122		/*
123		 * The case where Z and E are both sent in as '1' could be
124		 * construed as a valid 'normal' case, but we decide not to,
125		 * to avoid confusion
126		 */
127		WARN_ONCE(1, "Invalid use of Z and E flags\n");
128		return -EIO;
129	}
130
131	mapping_le = cpu_to_le32(mapping);
132	return __btt_map_write(arena, lba, mapping_le);
133}
134
135static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
136			int *trim, int *error)
137{
138	int ret;
139	__le32 in;
140	u32 raw_mapping, postmap, ze, z_flag, e_flag;
141	u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
142
143	WARN_ON(lba >= arena->external_nlba);
144
145	ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE);
146	if (ret)
147		return ret;
148
149	raw_mapping = le32_to_cpu(in);
150
151	z_flag = (raw_mapping & MAP_TRIM_MASK) >> MAP_TRIM_SHIFT;
152	e_flag = (raw_mapping & MAP_ERR_MASK) >> MAP_ERR_SHIFT;
153	ze = (z_flag << 1) + e_flag;
154	postmap = raw_mapping & MAP_LBA_MASK;
155
156	/* Reuse the {z,e}_flag variables for *trim and *error */
157	z_flag = 0;
158	e_flag = 0;
159
160	switch (ze) {
161	case 0:
162		/* Initial state. Return postmap = premap */
163		*mapping = lba;
164		break;
165	case 1:
166		*mapping = postmap;
167		e_flag = 1;
168		break;
169	case 2:
170		*mapping = postmap;
171		z_flag = 1;
172		break;
173	case 3:
174		*mapping = postmap;
175		break;
176	default:
177		return -EIO;
178	}
179
180	if (trim)
181		*trim = z_flag;
182	if (error)
183		*error = e_flag;
184
185	return ret;
186}
187
188static int btt_log_read_pair(struct arena_info *arena, u32 lane,
189			struct log_entry *ent)
190{
191	WARN_ON(!ent);
192	return arena_read_bytes(arena,
193			arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
194			2 * LOG_ENT_SIZE);
195}
196
197static struct dentry *debugfs_root;
198
199static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
200				int idx)
201{
202	char dirname[32];
203	struct dentry *d;
204
205	/* If for some reason, parent bttN was not created, exit */
206	if (!parent)
207		return;
208
209	snprintf(dirname, 32, "arena%d", idx);
210	d = debugfs_create_dir(dirname, parent);
211	if (IS_ERR_OR_NULL(d))
212		return;
213	a->debugfs_dir = d;
214
215	debugfs_create_x64("size", S_IRUGO, d, &a->size);
216	debugfs_create_x64("external_lba_start", S_IRUGO, d,
217				&a->external_lba_start);
218	debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
219	debugfs_create_u32("internal_lbasize", S_IRUGO, d,
220				&a->internal_lbasize);
221	debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
222	debugfs_create_u32("external_lbasize", S_IRUGO, d,
223				&a->external_lbasize);
224	debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
225	debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
226	debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
227	debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
228	debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
229	debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
230	debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
231	debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
232	debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
233	debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
234}
235
236static void btt_debugfs_init(struct btt *btt)
237{
238	int i = 0;
239	struct arena_info *arena;
240
241	btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
242						debugfs_root);
243	if (IS_ERR_OR_NULL(btt->debugfs_dir))
244		return;
245
246	list_for_each_entry(arena, &btt->arena_list, list) {
247		arena_debugfs_init(arena, btt->debugfs_dir, i);
248		i++;
249	}
250}
251
252/*
253 * This function accepts two log entries, and uses the
254 * sequence number to find the 'older' entry.
255 * It also updates the sequence number in this old entry to
256 * make it the 'new' one if the mark_flag is set.
257 * Finally, it returns which of the entries was the older one.
258 *
259 * TODO The logic feels a bit kludge-y. make it better..
260 */
261static int btt_log_get_old(struct log_entry *ent)
262{
263	int old;
264
265	/*
266	 * the first ever time this is seen, the entry goes into [0]
267	 * the next time, the following logic works out to put this
268	 * (next) entry into [1]
269	 */
270	if (ent[0].seq == 0) {
271		ent[0].seq = cpu_to_le32(1);
272		return 0;
273	}
274
275	if (ent[0].seq == ent[1].seq)
276		return -EINVAL;
277	if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5)
278		return -EINVAL;
279
280	if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) {
281		if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1)
282			old = 0;
283		else
284			old = 1;
285	} else {
286		if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1)
287			old = 1;
288		else
289			old = 0;
290	}
291
292	return old;
293}
294
295static struct device *to_dev(struct arena_info *arena)
296{
297	return &arena->nd_btt->dev;
298}
299
300/*
301 * This function copies the desired (old/new) log entry into ent if
302 * it is not NULL. It returns the sub-slot number (0 or 1)
303 * where the desired log entry was found. Negative return values
304 * indicate errors.
305 */
306static int btt_log_read(struct arena_info *arena, u32 lane,
307			struct log_entry *ent, int old_flag)
308{
309	int ret;
310	int old_ent, ret_ent;
311	struct log_entry log[2];
312
313	ret = btt_log_read_pair(arena, lane, log);
314	if (ret)
315		return -EIO;
316
317	old_ent = btt_log_get_old(log);
318	if (old_ent < 0 || old_ent > 1) {
319		dev_info(to_dev(arena),
320				"log corruption (%d): lane %d seq [%d, %d]\n",
321			old_ent, lane, log[0].seq, log[1].seq);
322		/* TODO set error state? */
323		return -EIO;
324	}
325
326	ret_ent = (old_flag ? old_ent : (1 - old_ent));
327
328	if (ent != NULL)
329		memcpy(ent, &log[ret_ent], LOG_ENT_SIZE);
330
331	return ret_ent;
332}
333
334/*
335 * This function commits a log entry to media
336 * It does _not_ prepare the freelist entry for the next write
337 * btt_flog_write is the wrapper for updating the freelist elements
338 */
339static int __btt_log_write(struct arena_info *arena, u32 lane,
340			u32 sub, struct log_entry *ent)
341{
342	int ret;
343	/*
344	 * Ignore the padding in log_entry for calculating log_half.
345	 * The entry is 'committed' when we write the sequence number,
346	 * and we want to ensure that that is the last thing written.
347	 * We don't bother writing the padding as that would be extra
348	 * media wear and write amplification
349	 */
350	unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2;
351	u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE);
352	void *src = ent;
353
354	/* split the 16B write into atomic, durable halves */
355	ret = arena_write_bytes(arena, ns_off, src, log_half);
356	if (ret)
357		return ret;
358
359	ns_off += log_half;
360	src += log_half;
361	return arena_write_bytes(arena, ns_off, src, log_half);
362}
363
364static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
365			struct log_entry *ent)
366{
367	int ret;
368
369	ret = __btt_log_write(arena, lane, sub, ent);
370	if (ret)
371		return ret;
372
373	/* prepare the next free entry */
374	arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
375	if (++(arena->freelist[lane].seq) == 4)
376		arena->freelist[lane].seq = 1;
377	arena->freelist[lane].block = le32_to_cpu(ent->old_map);
378
379	return ret;
380}
381
382/*
383 * This function initializes the BTT map to the initial state, which is
384 * all-zeroes, and indicates an identity mapping
385 */
386static int btt_map_init(struct arena_info *arena)
387{
388	int ret = -EINVAL;
389	void *zerobuf;
390	size_t offset = 0;
391	size_t chunk_size = SZ_2M;
392	size_t mapsize = arena->logoff - arena->mapoff;
393
394	zerobuf = kzalloc(chunk_size, GFP_KERNEL);
395	if (!zerobuf)
396		return -ENOMEM;
397
398	while (mapsize) {
399		size_t size = min(mapsize, chunk_size);
400
401		ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
402				size);
403		if (ret)
404			goto free;
405
406		offset += size;
407		mapsize -= size;
408		cond_resched();
409	}
410
411 free:
412	kfree(zerobuf);
413	return ret;
414}
415
416/*
417 * This function initializes the BTT log with 'fake' entries pointing
418 * to the initial reserved set of blocks as being free
419 */
420static int btt_log_init(struct arena_info *arena)
421{
422	int ret;
423	u32 i;
424	struct log_entry log, zerolog;
425
426	memset(&zerolog, 0, sizeof(zerolog));
427
428	for (i = 0; i < arena->nfree; i++) {
429		log.lba = cpu_to_le32(i);
430		log.old_map = cpu_to_le32(arena->external_nlba + i);
431		log.new_map = cpu_to_le32(arena->external_nlba + i);
432		log.seq = cpu_to_le32(LOG_SEQ_INIT);
433		ret = __btt_log_write(arena, i, 0, &log);
434		if (ret)
435			return ret;
436		ret = __btt_log_write(arena, i, 1, &zerolog);
437		if (ret)
438			return ret;
439	}
440
441	return 0;
442}
443
444static int btt_freelist_init(struct arena_info *arena)
445{
446	int old, new, ret;
447	u32 i, map_entry;
448	struct log_entry log_new, log_old;
449
450	arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
451					GFP_KERNEL);
452	if (!arena->freelist)
453		return -ENOMEM;
454
455	for (i = 0; i < arena->nfree; i++) {
456		old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
457		if (old < 0)
458			return old;
459
460		new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
461		if (new < 0)
462			return new;
463
464		/* sub points to the next one to be overwritten */
465		arena->freelist[i].sub = 1 - new;
466		arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
467		arena->freelist[i].block = le32_to_cpu(log_new.old_map);
468
469		/* This implies a newly created or untouched flog entry */
470		if (log_new.old_map == log_new.new_map)
471			continue;
472
473		/* Check if map recovery is needed */
474		ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
475				NULL, NULL);
476		if (ret)
477			return ret;
478		if ((le32_to_cpu(log_new.new_map) != map_entry) &&
479				(le32_to_cpu(log_new.old_map) == map_entry)) {
480			/*
481			 * Last transaction wrote the flog, but wasn't able
482			 * to complete the map write. So fix up the map.
483			 */
484			ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
485					le32_to_cpu(log_new.new_map), 0, 0);
486			if (ret)
487				return ret;
488		}
489
490	}
491
492	return 0;
493}
494
495static int btt_rtt_init(struct arena_info *arena)
496{
497	arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
498	if (arena->rtt == NULL)
499		return -ENOMEM;
500
501	return 0;
502}
503
504static int btt_maplocks_init(struct arena_info *arena)
505{
506	u32 i;
507
508	arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
509				GFP_KERNEL);
510	if (!arena->map_locks)
511		return -ENOMEM;
512
513	for (i = 0; i < arena->nfree; i++)
514		spin_lock_init(&arena->map_locks[i].lock);
515
516	return 0;
517}
518
519static struct arena_info *alloc_arena(struct btt *btt, size_t size,
520				size_t start, size_t arena_off)
521{
522	struct arena_info *arena;
523	u64 logsize, mapsize, datasize;
524	u64 available = size;
525
526	arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
527	if (!arena)
528		return NULL;
529	arena->nd_btt = btt->nd_btt;
530
531	if (!size)
532		return arena;
533
534	arena->size = size;
535	arena->external_lba_start = start;
536	arena->external_lbasize = btt->lbasize;
537	arena->internal_lbasize = roundup(arena->external_lbasize,
538					INT_LBASIZE_ALIGNMENT);
539	arena->nfree = BTT_DEFAULT_NFREE;
540	arena->version_major = 1;
541	arena->version_minor = 1;
542
543	if (available % BTT_PG_SIZE)
544		available -= (available % BTT_PG_SIZE);
545
546	/* Two pages are reserved for the super block and its copy */
547	available -= 2 * BTT_PG_SIZE;
548
549	/* The log takes a fixed amount of space based on nfree */
550	logsize = roundup(2 * arena->nfree * sizeof(struct log_entry),
551				BTT_PG_SIZE);
552	available -= logsize;
553
554	/* Calculate optimal split between map and data area */
555	arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
556			arena->internal_lbasize + MAP_ENT_SIZE);
557	arena->external_nlba = arena->internal_nlba - arena->nfree;
558
559	mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
560	datasize = available - mapsize;
561
562	/* 'Absolute' values, relative to start of storage space */
563	arena->infooff = arena_off;
564	arena->dataoff = arena->infooff + BTT_PG_SIZE;
565	arena->mapoff = arena->dataoff + datasize;
566	arena->logoff = arena->mapoff + mapsize;
567	arena->info2off = arena->logoff + logsize;
568	return arena;
569}
570
571static void free_arenas(struct btt *btt)
572{
573	struct arena_info *arena, *next;
574
575	list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
576		list_del(&arena->list);
577		kfree(arena->rtt);
578		kfree(arena->map_locks);
579		kfree(arena->freelist);
580		debugfs_remove_recursive(arena->debugfs_dir);
581		kfree(arena);
582	}
583}
584
585/*
586 * This function reads an existing valid btt superblock and
587 * populates the corresponding arena_info struct
588 */
589static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
590				u64 arena_off)
591{
592	arena->internal_nlba = le32_to_cpu(super->internal_nlba);
593	arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
594	arena->external_nlba = le32_to_cpu(super->external_nlba);
595	arena->external_lbasize = le32_to_cpu(super->external_lbasize);
596	arena->nfree = le32_to_cpu(super->nfree);
597	arena->version_major = le16_to_cpu(super->version_major);
598	arena->version_minor = le16_to_cpu(super->version_minor);
599
600	arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
601			le64_to_cpu(super->nextoff));
602	arena->infooff = arena_off;
603	arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
604	arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
605	arena->logoff = arena_off + le64_to_cpu(super->logoff);
606	arena->info2off = arena_off + le64_to_cpu(super->info2off);
607
608	arena->size = (le64_to_cpu(super->nextoff) > 0)
609		? (le64_to_cpu(super->nextoff))
610		: (arena->info2off - arena->infooff + BTT_PG_SIZE);
611
612	arena->flags = le32_to_cpu(super->flags);
613}
614
615static int discover_arenas(struct btt *btt)
616{
617	int ret = 0;
618	struct arena_info *arena;
619	struct btt_sb *super;
620	size_t remaining = btt->rawsize;
621	u64 cur_nlba = 0;
622	size_t cur_off = 0;
623	int num_arenas = 0;
624
625	super = kzalloc(sizeof(*super), GFP_KERNEL);
626	if (!super)
627		return -ENOMEM;
628
629	while (remaining) {
630		/* Alloc memory for arena */
631		arena = alloc_arena(btt, 0, 0, 0);
632		if (!arena) {
633			ret = -ENOMEM;
634			goto out_super;
635		}
636
637		arena->infooff = cur_off;
638		ret = btt_info_read(arena, super);
639		if (ret)
640			goto out;
641
642		if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
643			if (remaining == btt->rawsize) {
644				btt->init_state = INIT_NOTFOUND;
645				dev_info(to_dev(arena), "No existing arenas\n");
646				goto out;
647			} else {
648				dev_info(to_dev(arena),
649						"Found corrupted metadata!\n");
650				ret = -ENODEV;
651				goto out;
652			}
653		}
654
655		arena->external_lba_start = cur_nlba;
656		parse_arena_meta(arena, super, cur_off);
657
658		ret = btt_freelist_init(arena);
659		if (ret)
660			goto out;
661
662		ret = btt_rtt_init(arena);
663		if (ret)
664			goto out;
665
666		ret = btt_maplocks_init(arena);
667		if (ret)
668			goto out;
669
670		list_add_tail(&arena->list, &btt->arena_list);
671
672		remaining -= arena->size;
673		cur_off += arena->size;
674		cur_nlba += arena->external_nlba;
675		num_arenas++;
676
677		if (arena->nextoff == 0)
678			break;
679	}
680	btt->num_arenas = num_arenas;
681	btt->nlba = cur_nlba;
682	btt->init_state = INIT_READY;
683
684	kfree(super);
685	return ret;
686
687 out:
688	kfree(arena);
689	free_arenas(btt);
690 out_super:
691	kfree(super);
692	return ret;
693}
694
695static int create_arenas(struct btt *btt)
696{
697	size_t remaining = btt->rawsize;
698	size_t cur_off = 0;
699
700	while (remaining) {
701		struct arena_info *arena;
702		size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
703
704		remaining -= arena_size;
705		if (arena_size < ARENA_MIN_SIZE)
706			break;
707
708		arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
709		if (!arena) {
710			free_arenas(btt);
711			return -ENOMEM;
712		}
713		btt->nlba += arena->external_nlba;
714		if (remaining >= ARENA_MIN_SIZE)
715			arena->nextoff = arena->size;
716		else
717			arena->nextoff = 0;
718		cur_off += arena_size;
719		list_add_tail(&arena->list, &btt->arena_list);
720	}
721
722	return 0;
723}
724
725/*
726 * This function completes arena initialization by writing
727 * all the metadata.
728 * It is only called for an uninitialized arena when a write
729 * to that arena occurs for the first time.
730 */
731static int btt_arena_write_layout(struct arena_info *arena)
732{
733	int ret;
734	u64 sum;
735	struct btt_sb *super;
736	struct nd_btt *nd_btt = arena->nd_btt;
737	const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
738
739	ret = btt_map_init(arena);
740	if (ret)
741		return ret;
742
743	ret = btt_log_init(arena);
744	if (ret)
745		return ret;
746
747	super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
748	if (!super)
749		return -ENOMEM;
750
751	strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
752	memcpy(super->uuid, nd_btt->uuid, 16);
753	memcpy(super->parent_uuid, parent_uuid, 16);
754	super->flags = cpu_to_le32(arena->flags);
755	super->version_major = cpu_to_le16(arena->version_major);
756	super->version_minor = cpu_to_le16(arena->version_minor);
757	super->external_lbasize = cpu_to_le32(arena->external_lbasize);
758	super->external_nlba = cpu_to_le32(arena->external_nlba);
759	super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
760	super->internal_nlba = cpu_to_le32(arena->internal_nlba);
761	super->nfree = cpu_to_le32(arena->nfree);
762	super->infosize = cpu_to_le32(sizeof(struct btt_sb));
763	super->nextoff = cpu_to_le64(arena->nextoff);
764	/*
765	 * Subtract arena->infooff (arena start) so numbers are relative
766	 * to 'this' arena
767	 */
768	super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
769	super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
770	super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
771	super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
772
773	super->flags = 0;
774	sum = nd_sb_checksum((struct nd_gen_sb *) super);
775	super->checksum = cpu_to_le64(sum);
776
777	ret = btt_info_write(arena, super);
778
779	kfree(super);
780	return ret;
781}
782
783/*
784 * This function completes the initialization for the BTT namespace
785 * such that it is ready to accept IOs
786 */
787static int btt_meta_init(struct btt *btt)
788{
789	int ret = 0;
790	struct arena_info *arena;
791
792	mutex_lock(&btt->init_lock);
793	list_for_each_entry(arena, &btt->arena_list, list) {
794		ret = btt_arena_write_layout(arena);
795		if (ret)
796			goto unlock;
797
798		ret = btt_freelist_init(arena);
799		if (ret)
800			goto unlock;
801
802		ret = btt_rtt_init(arena);
803		if (ret)
804			goto unlock;
805
806		ret = btt_maplocks_init(arena);
807		if (ret)
808			goto unlock;
809	}
810
811	btt->init_state = INIT_READY;
812
813 unlock:
814	mutex_unlock(&btt->init_lock);
815	return ret;
816}
817
818static u32 btt_meta_size(struct btt *btt)
819{
820	return btt->lbasize - btt->sector_size;
821}
822
823/*
824 * This function calculates the arena in which the given LBA lies
825 * by doing a linear walk. This is acceptable since we expect only
826 * a few arenas. If we have backing devices that get much larger,
827 * we can construct a balanced binary tree of arenas at init time
828 * so that this range search becomes faster.
829 */
830static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
831				struct arena_info **arena)
832{
833	struct arena_info *arena_list;
834	__u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
835
836	list_for_each_entry(arena_list, &btt->arena_list, list) {
837		if (lba < arena_list->external_nlba) {
838			*arena = arena_list;
839			*premap = lba;
840			return 0;
841		}
842		lba -= arena_list->external_nlba;
843	}
844
845	return -EIO;
846}
847
848/*
849 * The following (lock_map, unlock_map) are mostly just to improve
850 * readability, since they index into an array of locks
851 */
852static void lock_map(struct arena_info *arena, u32 premap)
853		__acquires(&arena->map_locks[idx].lock)
854{
855	u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
856
857	spin_lock(&arena->map_locks[idx].lock);
858}
859
860static void unlock_map(struct arena_info *arena, u32 premap)
861		__releases(&arena->map_locks[idx].lock)
862{
863	u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
864
865	spin_unlock(&arena->map_locks[idx].lock);
866}
867
868static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
869{
870	return arena->dataoff + ((u64)lba * arena->internal_lbasize);
871}
872
873static int btt_data_read(struct arena_info *arena, struct page *page,
874			unsigned int off, u32 lba, u32 len)
875{
876	int ret;
877	u64 nsoff = to_namespace_offset(arena, lba);
878	void *mem = kmap_atomic(page);
879
880	ret = arena_read_bytes(arena, nsoff, mem + off, len);
881	kunmap_atomic(mem);
882
883	return ret;
884}
885
886static int btt_data_write(struct arena_info *arena, u32 lba,
887			struct page *page, unsigned int off, u32 len)
888{
889	int ret;
890	u64 nsoff = to_namespace_offset(arena, lba);
891	void *mem = kmap_atomic(page);
892
893	ret = arena_write_bytes(arena, nsoff, mem + off, len);
894	kunmap_atomic(mem);
895
896	return ret;
897}
898
899static void zero_fill_data(struct page *page, unsigned int off, u32 len)
900{
901	void *mem = kmap_atomic(page);
902
903	memset(mem + off, 0, len);
904	kunmap_atomic(mem);
905}
906
907#ifdef CONFIG_BLK_DEV_INTEGRITY
908static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
909			struct arena_info *arena, u32 postmap, int rw)
910{
911	unsigned int len = btt_meta_size(btt);
912	u64 meta_nsoff;
913	int ret = 0;
914
915	if (bip == NULL)
916		return 0;
917
918	meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
919
920	while (len) {
921		unsigned int cur_len;
922		struct bio_vec bv;
923		void *mem;
924
925		bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
926		/*
927		 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
928		 * .bv_offset already adjusted for iter->bi_bvec_done, and we
929		 * can use those directly
930		 */
931
932		cur_len = min(len, bv.bv_len);
933		mem = kmap_atomic(bv.bv_page);
934		if (rw)
935			ret = arena_write_bytes(arena, meta_nsoff,
936					mem + bv.bv_offset, cur_len);
937		else
938			ret = arena_read_bytes(arena, meta_nsoff,
939					mem + bv.bv_offset, cur_len);
940
941		kunmap_atomic(mem);
942		if (ret)
943			return ret;
944
945		len -= cur_len;
946		meta_nsoff += cur_len;
947		bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len);
948	}
949
950	return ret;
951}
952
953#else /* CONFIG_BLK_DEV_INTEGRITY */
954static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
955			struct arena_info *arena, u32 postmap, int rw)
956{
957	return 0;
958}
959#endif
960
961static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
962			struct page *page, unsigned int off, sector_t sector,
963			unsigned int len)
964{
965	int ret = 0;
966	int t_flag, e_flag;
967	struct arena_info *arena = NULL;
968	u32 lane = 0, premap, postmap;
969
970	while (len) {
971		u32 cur_len;
972
973		lane = nd_region_acquire_lane(btt->nd_region);
974
975		ret = lba_to_arena(btt, sector, &premap, &arena);
976		if (ret)
977			goto out_lane;
978
979		cur_len = min(btt->sector_size, len);
980
981		ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag);
982		if (ret)
983			goto out_lane;
984
985		/*
986		 * We loop to make sure that the post map LBA didn't change
987		 * from under us between writing the RTT and doing the actual
988		 * read.
989		 */
990		while (1) {
991			u32 new_map;
992
993			if (t_flag) {
994				zero_fill_data(page, off, cur_len);
995				goto out_lane;
996			}
997
998			if (e_flag) {
999				ret = -EIO;
1000				goto out_lane;
1001			}
1002
1003			arena->rtt[lane] = RTT_VALID | postmap;
1004			/*
1005			 * Barrier to make sure this write is not reordered
1006			 * to do the verification map_read before the RTT store
1007			 */
1008			barrier();
1009
1010			ret = btt_map_read(arena, premap, &new_map, &t_flag,
1011						&e_flag);
1012			if (ret)
1013				goto out_rtt;
1014
1015			if (postmap == new_map)
1016				break;
1017
1018			postmap = new_map;
1019		}
1020
1021		ret = btt_data_read(arena, page, off, postmap, cur_len);
1022		if (ret)
1023			goto out_rtt;
1024
1025		if (bip) {
1026			ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1027			if (ret)
1028				goto out_rtt;
1029		}
1030
1031		arena->rtt[lane] = RTT_INVALID;
1032		nd_region_release_lane(btt->nd_region, lane);
1033
1034		len -= cur_len;
1035		off += cur_len;
1036		sector += btt->sector_size >> SECTOR_SHIFT;
1037	}
1038
1039	return 0;
1040
1041 out_rtt:
1042	arena->rtt[lane] = RTT_INVALID;
1043 out_lane:
1044	nd_region_release_lane(btt->nd_region, lane);
1045	return ret;
1046}
1047
1048static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1049			sector_t sector, struct page *page, unsigned int off,
1050			unsigned int len)
1051{
1052	int ret = 0;
1053	struct arena_info *arena = NULL;
1054	u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1055	struct log_entry log;
1056	int sub;
1057
1058	while (len) {
1059		u32 cur_len;
1060
1061		lane = nd_region_acquire_lane(btt->nd_region);
1062
1063		ret = lba_to_arena(btt, sector, &premap, &arena);
1064		if (ret)
1065			goto out_lane;
1066		cur_len = min(btt->sector_size, len);
1067
1068		if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1069			ret = -EIO;
1070			goto out_lane;
1071		}
1072
1073		new_postmap = arena->freelist[lane].block;
1074
1075		/* Wait if the new block is being read from */
1076		for (i = 0; i < arena->nfree; i++)
1077			while (arena->rtt[i] == (RTT_VALID | new_postmap))
1078				cpu_relax();
1079
1080
1081		if (new_postmap >= arena->internal_nlba) {
1082			ret = -EIO;
1083			goto out_lane;
1084		}
1085
1086		ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1087		if (ret)
1088			goto out_lane;
1089
1090		if (bip) {
1091			ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1092						WRITE);
1093			if (ret)
1094				goto out_lane;
1095		}
1096
1097		lock_map(arena, premap);
1098		ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL);
1099		if (ret)
1100			goto out_map;
1101		if (old_postmap >= arena->internal_nlba) {
1102			ret = -EIO;
1103			goto out_map;
1104		}
1105
1106		log.lba = cpu_to_le32(premap);
1107		log.old_map = cpu_to_le32(old_postmap);
1108		log.new_map = cpu_to_le32(new_postmap);
1109		log.seq = cpu_to_le32(arena->freelist[lane].seq);
1110		sub = arena->freelist[lane].sub;
1111		ret = btt_flog_write(arena, lane, sub, &log);
1112		if (ret)
1113			goto out_map;
1114
1115		ret = btt_map_write(arena, premap, new_postmap, 0, 0);
1116		if (ret)
1117			goto out_map;
1118
1119		unlock_map(arena, premap);
1120		nd_region_release_lane(btt->nd_region, lane);
1121
1122		len -= cur_len;
1123		off += cur_len;
1124		sector += btt->sector_size >> SECTOR_SHIFT;
1125	}
1126
1127	return 0;
1128
1129 out_map:
1130	unlock_map(arena, premap);
1131 out_lane:
1132	nd_region_release_lane(btt->nd_region, lane);
1133	return ret;
1134}
1135
1136static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1137			struct page *page, unsigned int len, unsigned int off,
1138			int rw, sector_t sector)
1139{
1140	int ret;
1141
1142	if (rw == READ) {
1143		ret = btt_read_pg(btt, bip, page, off, sector, len);
1144		flush_dcache_page(page);
1145	} else {
1146		flush_dcache_page(page);
1147		ret = btt_write_pg(btt, bip, sector, page, off, len);
1148	}
1149
1150	return ret;
1151}
1152
1153static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1154{
1155	struct bio_integrity_payload *bip = bio_integrity(bio);
1156	struct btt *btt = q->queuedata;
1157	struct bvec_iter iter;
1158	unsigned long start;
1159	struct bio_vec bvec;
1160	int err = 0, rw;
1161	bool do_acct;
1162
1163	/*
1164	 * bio_integrity_enabled also checks if the bio already has an
1165	 * integrity payload attached. If it does, we *don't* do a
1166	 * bio_integrity_prep here - the payload has been generated by
1167	 * another kernel subsystem, and we just pass it through.
1168	 */
1169	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1170		bio->bi_error = -EIO;
1171		goto out;
1172	}
1173
1174	do_acct = nd_iostat_start(bio, &start);
1175	rw = bio_data_dir(bio);
1176	bio_for_each_segment(bvec, bio, iter) {
1177		unsigned int len = bvec.bv_len;
1178
1179		BUG_ON(len > PAGE_SIZE);
1180		/* Make sure len is in multiples of sector size. */
1181		/* XXX is this right? */
1182		BUG_ON(len < btt->sector_size);
1183		BUG_ON(len % btt->sector_size);
1184
1185		err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1186				rw, iter.bi_sector);
1187		if (err) {
1188			dev_info(&btt->nd_btt->dev,
1189					"io error in %s sector %lld, len %d,\n",
1190					(rw == READ) ? "READ" : "WRITE",
1191					(unsigned long long) iter.bi_sector, len);
1192			bio->bi_error = err;
1193			break;
1194		}
1195	}
1196	if (do_acct)
1197		nd_iostat_end(bio, start);
1198
1199out:
1200	bio_endio(bio);
1201	return BLK_QC_T_NONE;
1202}
1203
1204static int btt_rw_page(struct block_device *bdev, sector_t sector,
1205		struct page *page, int rw)
1206{
1207	struct btt *btt = bdev->bd_disk->private_data;
1208
1209	btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
1210	page_endio(page, rw & WRITE, 0);
1211	return 0;
1212}
1213
1214
1215static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1216{
1217	/* some standard values */
1218	geo->heads = 1 << 6;
1219	geo->sectors = 1 << 5;
1220	geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1221	return 0;
1222}
1223
1224static const struct block_device_operations btt_fops = {
1225	.owner =		THIS_MODULE,
1226	.rw_page =		btt_rw_page,
1227	.getgeo =		btt_getgeo,
1228	.revalidate_disk =	nvdimm_revalidate_disk,
1229};
1230
1231static int btt_blk_init(struct btt *btt)
1232{
1233	struct nd_btt *nd_btt = btt->nd_btt;
1234	struct nd_namespace_common *ndns = nd_btt->ndns;
1235
1236	/* create a new disk and request queue for btt */
1237	btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
1238	if (!btt->btt_queue)
1239		return -ENOMEM;
1240
1241	btt->btt_disk = alloc_disk(0);
1242	if (!btt->btt_disk) {
1243		blk_cleanup_queue(btt->btt_queue);
1244		return -ENOMEM;
1245	}
1246
1247	nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1248	btt->btt_disk->driverfs_dev = &btt->nd_btt->dev;
1249	btt->btt_disk->major = btt_major;
1250	btt->btt_disk->first_minor = 0;
1251	btt->btt_disk->fops = &btt_fops;
1252	btt->btt_disk->private_data = btt;
1253	btt->btt_disk->queue = btt->btt_queue;
1254	btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
1255
1256	blk_queue_make_request(btt->btt_queue, btt_make_request);
1257	blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
1258	blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
1259	blk_queue_bounce_limit(btt->btt_queue, BLK_BOUNCE_ANY);
1260	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
1261	btt->btt_queue->queuedata = btt;
1262
1263	set_capacity(btt->btt_disk, 0);
1264	add_disk(btt->btt_disk);
1265	if (btt_meta_size(btt)) {
1266		int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1267
1268		if (rc) {
1269			del_gendisk(btt->btt_disk);
1270			put_disk(btt->btt_disk);
1271			blk_cleanup_queue(btt->btt_queue);
1272			return rc;
1273		}
1274	}
1275	set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1276	revalidate_disk(btt->btt_disk);
1277
1278	return 0;
1279}
1280
1281static void btt_blk_cleanup(struct btt *btt)
1282{
1283	del_gendisk(btt->btt_disk);
1284	put_disk(btt->btt_disk);
1285	blk_cleanup_queue(btt->btt_queue);
1286}
1287
1288/**
1289 * btt_init - initialize a block translation table for the given device
1290 * @nd_btt:	device with BTT geometry and backing device info
1291 * @rawsize:	raw size in bytes of the backing device
1292 * @lbasize:	lba size of the backing device
1293 * @uuid:	A uuid for the backing device - this is stored on media
1294 * @maxlane:	maximum number of parallel requests the device can handle
1295 *
1296 * Initialize a Block Translation Table on a backing device to provide
1297 * single sector power fail atomicity.
1298 *
1299 * Context:
1300 * Might sleep.
1301 *
1302 * Returns:
1303 * Pointer to a new struct btt on success, NULL on failure.
1304 */
1305static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1306		u32 lbasize, u8 *uuid, struct nd_region *nd_region)
1307{
1308	int ret;
1309	struct btt *btt;
1310	struct device *dev = &nd_btt->dev;
1311
1312	btt = kzalloc(sizeof(struct btt), GFP_KERNEL);
1313	if (!btt)
1314		return NULL;
1315
1316	btt->nd_btt = nd_btt;
1317	btt->rawsize = rawsize;
1318	btt->lbasize = lbasize;
1319	btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1320	INIT_LIST_HEAD(&btt->arena_list);
1321	mutex_init(&btt->init_lock);
1322	btt->nd_region = nd_region;
1323
1324	ret = discover_arenas(btt);
1325	if (ret) {
1326		dev_err(dev, "init: error in arena_discover: %d\n", ret);
1327		goto out_free;
1328	}
1329
1330	if (btt->init_state != INIT_READY && nd_region->ro) {
1331		dev_info(dev, "%s is read-only, unable to init btt metadata\n",
1332				dev_name(&nd_region->dev));
1333		goto out_free;
1334	} else if (btt->init_state != INIT_READY) {
1335		btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1336			((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1337		dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1338				btt->num_arenas, rawsize);
1339
1340		ret = create_arenas(btt);
1341		if (ret) {
1342			dev_info(dev, "init: create_arenas: %d\n", ret);
1343			goto out_free;
1344		}
1345
1346		ret = btt_meta_init(btt);
1347		if (ret) {
1348			dev_err(dev, "init: error in meta_init: %d\n", ret);
1349			goto out_free;
1350		}
1351	}
1352
1353	ret = btt_blk_init(btt);
1354	if (ret) {
1355		dev_err(dev, "init: error in blk_init: %d\n", ret);
1356		goto out_free;
1357	}
1358
1359	btt_debugfs_init(btt);
1360
1361	return btt;
1362
1363 out_free:
1364	kfree(btt);
1365	return NULL;
1366}
1367
1368/**
1369 * btt_fini - de-initialize a BTT
1370 * @btt:	the BTT handle that was generated by btt_init
1371 *
1372 * De-initialize a Block Translation Table on device removal
1373 *
1374 * Context:
1375 * Might sleep.
1376 */
1377static void btt_fini(struct btt *btt)
1378{
1379	if (btt) {
1380		btt_blk_cleanup(btt);
1381		free_arenas(btt);
1382		debugfs_remove_recursive(btt->debugfs_dir);
1383		kfree(btt);
1384	}
1385}
1386
1387int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1388{
1389	struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1390	struct nd_region *nd_region;
1391	struct btt *btt;
1392	size_t rawsize;
1393
1394	if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize)
1395		return -ENODEV;
1396
1397	rawsize = nvdimm_namespace_capacity(ndns) - SZ_4K;
1398	if (rawsize < ARENA_MIN_SIZE) {
1399		return -ENXIO;
1400	}
1401	nd_region = to_nd_region(nd_btt->dev.parent);
1402	btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1403			nd_region);
1404	if (!btt)
1405		return -ENOMEM;
1406	nd_btt->btt = btt;
1407
1408	return 0;
1409}
1410EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1411
1412int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns)
1413{
1414	struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1415	struct btt *btt = nd_btt->btt;
1416
1417	btt_fini(btt);
1418	nd_btt->btt = NULL;
1419
1420	return 0;
1421}
1422EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1423
1424static int __init nd_btt_init(void)
1425{
1426	int rc;
1427
1428	btt_major = register_blkdev(0, "btt");
1429	if (btt_major < 0)
1430		return btt_major;
1431
1432	debugfs_root = debugfs_create_dir("btt", NULL);
1433	if (IS_ERR_OR_NULL(debugfs_root)) {
1434		rc = -ENXIO;
1435		goto err_debugfs;
1436	}
1437
1438	return 0;
1439
1440 err_debugfs:
1441	unregister_blkdev(btt_major, "btt");
1442
1443	return rc;
1444}
1445
1446static void __exit nd_btt_exit(void)
1447{
1448	debugfs_remove_recursive(debugfs_root);
1449	unregister_blkdev(btt_major, "btt");
1450}
1451
1452MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1453MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1454MODULE_LICENSE("GPL v2");
1455module_init(nd_btt_init);
1456module_exit(nd_btt_exit);
1457