1 /*
2  * Copyright (C) 2015 IT University of Copenhagen
3  * Initial release: Matias Bjorling <m@bjorling.me>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version
7  * 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
15  */
16 
17 #ifndef RRPC_H_
18 #define RRPC_H_
19 
20 #include <linux/blkdev.h>
21 #include <linux/blk-mq.h>
22 #include <linux/bio.h>
23 #include <linux/module.h>
24 #include <linux/kthread.h>
25 #include <linux/vmalloc.h>
26 
27 #include <linux/lightnvm.h>
28 
29 /* Run only GC if less than 1/X blocks are free */
30 #define GC_LIMIT_INVERSE 10
31 #define GC_TIME_SECS 100
32 
33 #define RRPC_SECTOR (512)
34 #define RRPC_EXPOSED_PAGE_SIZE (4096)
35 
36 #define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
37 
38 struct rrpc_inflight {
39 	struct list_head reqs;
40 	spinlock_t lock;
41 };
42 
43 struct rrpc_inflight_rq {
44 	struct list_head list;
45 	sector_t l_start;
46 	sector_t l_end;
47 };
48 
49 struct rrpc_rq {
50 	struct rrpc_inflight_rq inflight_rq;
51 	struct rrpc_addr *addr;
52 	unsigned long flags;
53 };
54 
55 struct rrpc_block {
56 	struct nvm_block *parent;
57 	struct list_head prio;
58 
59 #define MAX_INVALID_PAGES_STORAGE 8
60 	/* Bitmap for invalid page intries */
61 	unsigned long invalid_pages[MAX_INVALID_PAGES_STORAGE];
62 	/* points to the next writable page within a block */
63 	unsigned int next_page;
64 	/* number of pages that are invalid, wrt host page size */
65 	unsigned int nr_invalid_pages;
66 
67 	spinlock_t lock;
68 	atomic_t data_cmnt_size; /* data pages committed to stable storage */
69 };
70 
71 struct rrpc_lun {
72 	struct rrpc *rrpc;
73 	struct nvm_lun *parent;
74 	struct rrpc_block *cur, *gc_cur;
75 	struct rrpc_block *blocks;	/* Reference to block allocation */
76 	struct list_head prio_list;		/* Blocks that may be GC'ed */
77 	struct work_struct ws_gc;
78 
79 	spinlock_t lock;
80 };
81 
82 struct rrpc {
83 	/* instance must be kept in top to resolve rrpc in unprep */
84 	struct nvm_tgt_instance instance;
85 
86 	struct nvm_dev *dev;
87 	struct gendisk *disk;
88 
89 	u64 poffset; /* physical page offset */
90 	int lun_offset;
91 
92 	int nr_luns;
93 	struct rrpc_lun *luns;
94 
95 	/* calculated values */
96 	unsigned long long nr_pages;
97 	unsigned long total_blocks;
98 
99 	/* Write strategy variables. Move these into each for structure for each
100 	 * strategy
101 	 */
102 	atomic_t next_lun; /* Whenever a page is written, this is updated
103 			    * to point to the next write lun
104 			    */
105 
106 	spinlock_t bio_lock;
107 	struct bio_list requeue_bios;
108 	struct work_struct ws_requeue;
109 
110 	/* Simple translation map of logical addresses to physical addresses.
111 	 * The logical addresses is known by the host system, while the physical
112 	 * addresses are used when writing to the disk block device.
113 	 */
114 	struct rrpc_addr *trans_map;
115 	/* also store a reverse map for garbage collection */
116 	struct rrpc_rev_addr *rev_trans_map;
117 	spinlock_t rev_lock;
118 
119 	struct rrpc_inflight inflights;
120 
121 	mempool_t *addr_pool;
122 	mempool_t *page_pool;
123 	mempool_t *gcb_pool;
124 	mempool_t *rq_pool;
125 
126 	struct timer_list gc_timer;
127 	struct workqueue_struct *krqd_wq;
128 	struct workqueue_struct *kgc_wq;
129 };
130 
131 struct rrpc_block_gc {
132 	struct rrpc *rrpc;
133 	struct rrpc_block *rblk;
134 	struct work_struct ws_gc;
135 };
136 
137 /* Logical to physical mapping */
138 struct rrpc_addr {
139 	u64 addr;
140 	struct rrpc_block *rblk;
141 };
142 
143 /* Physical to logical mapping */
144 struct rrpc_rev_addr {
145 	u64 addr;
146 };
147 
rrpc_get_laddr(struct bio * bio)148 static inline sector_t rrpc_get_laddr(struct bio *bio)
149 {
150 	return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
151 }
152 
rrpc_get_pages(struct bio * bio)153 static inline unsigned int rrpc_get_pages(struct bio *bio)
154 {
155 	return  bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
156 }
157 
rrpc_get_sector(sector_t laddr)158 static inline sector_t rrpc_get_sector(sector_t laddr)
159 {
160 	return laddr * NR_PHY_IN_LOG;
161 }
162 
request_intersects(struct rrpc_inflight_rq * r,sector_t laddr_start,sector_t laddr_end)163 static inline int request_intersects(struct rrpc_inflight_rq *r,
164 				sector_t laddr_start, sector_t laddr_end)
165 {
166 	return (laddr_end >= r->l_start && laddr_end <= r->l_end) &&
167 		(laddr_start >= r->l_start && laddr_start <= r->l_end);
168 }
169 
__rrpc_lock_laddr(struct rrpc * rrpc,sector_t laddr,unsigned pages,struct rrpc_inflight_rq * r)170 static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
171 			     unsigned pages, struct rrpc_inflight_rq *r)
172 {
173 	sector_t laddr_end = laddr + pages - 1;
174 	struct rrpc_inflight_rq *rtmp;
175 
176 	spin_lock_irq(&rrpc->inflights.lock);
177 	list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
178 		if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
179 			/* existing, overlapping request, come back later */
180 			spin_unlock_irq(&rrpc->inflights.lock);
181 			return 1;
182 		}
183 	}
184 
185 	r->l_start = laddr;
186 	r->l_end = laddr_end;
187 
188 	list_add_tail(&r->list, &rrpc->inflights.reqs);
189 	spin_unlock_irq(&rrpc->inflights.lock);
190 	return 0;
191 }
192 
rrpc_lock_laddr(struct rrpc * rrpc,sector_t laddr,unsigned pages,struct rrpc_inflight_rq * r)193 static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
194 				 unsigned pages,
195 				 struct rrpc_inflight_rq *r)
196 {
197 	BUG_ON((laddr + pages) > rrpc->nr_pages);
198 
199 	return __rrpc_lock_laddr(rrpc, laddr, pages, r);
200 }
201 
rrpc_get_inflight_rq(struct nvm_rq * rqd)202 static inline struct rrpc_inflight_rq *rrpc_get_inflight_rq(struct nvm_rq *rqd)
203 {
204 	struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
205 
206 	return &rrqd->inflight_rq;
207 }
208 
rrpc_lock_rq(struct rrpc * rrpc,struct bio * bio,struct nvm_rq * rqd)209 static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio,
210 							struct nvm_rq *rqd)
211 {
212 	sector_t laddr = rrpc_get_laddr(bio);
213 	unsigned int pages = rrpc_get_pages(bio);
214 	struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
215 
216 	return rrpc_lock_laddr(rrpc, laddr, pages, r);
217 }
218 
rrpc_unlock_laddr(struct rrpc * rrpc,struct rrpc_inflight_rq * r)219 static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
220 						struct rrpc_inflight_rq *r)
221 {
222 	unsigned long flags;
223 
224 	spin_lock_irqsave(&rrpc->inflights.lock, flags);
225 	list_del_init(&r->list);
226 	spin_unlock_irqrestore(&rrpc->inflights.lock, flags);
227 }
228 
rrpc_unlock_rq(struct rrpc * rrpc,struct nvm_rq * rqd)229 static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
230 {
231 	struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
232 	uint8_t pages = rqd->nr_pages;
233 
234 	BUG_ON((r->l_start + pages) > rrpc->nr_pages);
235 
236 	rrpc_unlock_laddr(rrpc, r);
237 }
238 
239 #endif /* RRPC_H_ */
240