1/*
2 * V9FS cache definitions.
3 *
4 *  Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu>
5 *
6 *  This program is free software; you can redistribute it and/or modify
7 *  it under the terms of the GNU General Public License version 2
8 *  as published by the Free Software Foundation.
9 *
10 *  This program is distributed in the hope that it will be useful,
11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 *  GNU General Public License for more details.
14 *
15 *  You should have received a copy of the GNU General Public License
16 *  along with this program; if not, write to:
17 *  Free Software Foundation
18 *  51 Franklin Street, Fifth Floor
19 *  Boston, MA  02111-1301  USA
20 *
21 */
22
23#include <linux/jiffies.h>
24#include <linux/file.h>
25#include <linux/slab.h>
26#include <linux/stat.h>
27#include <linux/sched.h>
28#include <linux/fs.h>
29#include <net/9p/9p.h>
30
31#include "v9fs.h"
32#include "cache.h"
33
34#define CACHETAG_LEN  11
35
36struct fscache_netfs v9fs_cache_netfs = {
37	.name 		= "9p",
38	.version 	= 0,
39};
40
41/**
42 * v9fs_random_cachetag - Generate a random tag to be associated
43 *			  with a new cache session.
44 *
45 * The value of jiffies is used for a fairly randomly cache tag.
46 */
47
48static
49int v9fs_random_cachetag(struct v9fs_session_info *v9ses)
50{
51	v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL);
52	if (!v9ses->cachetag)
53		return -ENOMEM;
54
55	return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies);
56}
57
58static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data,
59					   void *buffer, uint16_t bufmax)
60{
61	struct v9fs_session_info *v9ses;
62	uint16_t klen = 0;
63
64	v9ses = (struct v9fs_session_info *)cookie_netfs_data;
65	p9_debug(P9_DEBUG_FSC, "session %p buf %p size %u\n",
66		 v9ses, buffer, bufmax);
67
68	if (v9ses->cachetag)
69		klen = strlen(v9ses->cachetag);
70
71	if (klen > bufmax)
72		return 0;
73
74	memcpy(buffer, v9ses->cachetag, klen);
75	p9_debug(P9_DEBUG_FSC, "cache session tag %s\n", v9ses->cachetag);
76	return klen;
77}
78
79const struct fscache_cookie_def v9fs_cache_session_index_def = {
80	.name		= "9P.session",
81	.type		= FSCACHE_COOKIE_TYPE_INDEX,
82	.get_key	= v9fs_cache_session_get_key,
83};
84
85void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
86{
87	/* If no cache session tag was specified, we generate a random one. */
88	if (!v9ses->cachetag)
89		v9fs_random_cachetag(v9ses);
90
91	v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
92						&v9fs_cache_session_index_def,
93						v9ses, true);
94	p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
95		 v9ses, v9ses->fscache);
96}
97
98void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
99{
100	p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n",
101		 v9ses, v9ses->fscache);
102	fscache_relinquish_cookie(v9ses->fscache, 0);
103	v9ses->fscache = NULL;
104}
105
106
107static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data,
108					 void *buffer, uint16_t bufmax)
109{
110	const struct v9fs_inode *v9inode = cookie_netfs_data;
111	memcpy(buffer, &v9inode->qid.path, sizeof(v9inode->qid.path));
112	p9_debug(P9_DEBUG_FSC, "inode %p get key %llu\n",
113		 &v9inode->vfs_inode, v9inode->qid.path);
114	return sizeof(v9inode->qid.path);
115}
116
117static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data,
118				      uint64_t *size)
119{
120	const struct v9fs_inode *v9inode = cookie_netfs_data;
121	*size = i_size_read(&v9inode->vfs_inode);
122
123	p9_debug(P9_DEBUG_FSC, "inode %p get attr %llu\n",
124		 &v9inode->vfs_inode, *size);
125}
126
127static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data,
128					 void *buffer, uint16_t buflen)
129{
130	const struct v9fs_inode *v9inode = cookie_netfs_data;
131	memcpy(buffer, &v9inode->qid.version, sizeof(v9inode->qid.version));
132	p9_debug(P9_DEBUG_FSC, "inode %p get aux %u\n",
133		 &v9inode->vfs_inode, v9inode->qid.version);
134	return sizeof(v9inode->qid.version);
135}
136
137static enum
138fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
139					    const void *buffer,
140					    uint16_t buflen)
141{
142	const struct v9fs_inode *v9inode = cookie_netfs_data;
143
144	if (buflen != sizeof(v9inode->qid.version))
145		return FSCACHE_CHECKAUX_OBSOLETE;
146
147	if (memcmp(buffer, &v9inode->qid.version,
148		   sizeof(v9inode->qid.version)))
149		return FSCACHE_CHECKAUX_OBSOLETE;
150
151	return FSCACHE_CHECKAUX_OKAY;
152}
153
154static void v9fs_cache_inode_now_uncached(void *cookie_netfs_data)
155{
156	struct v9fs_inode *v9inode = cookie_netfs_data;
157	struct pagevec pvec;
158	pgoff_t first;
159	int loop, nr_pages;
160
161	pagevec_init(&pvec, 0);
162	first = 0;
163
164	for (;;) {
165		nr_pages = pagevec_lookup(&pvec, v9inode->vfs_inode.i_mapping,
166					  first,
167					  PAGEVEC_SIZE - pagevec_count(&pvec));
168		if (!nr_pages)
169			break;
170
171		for (loop = 0; loop < nr_pages; loop++)
172			ClearPageFsCache(pvec.pages[loop]);
173
174		first = pvec.pages[nr_pages - 1]->index + 1;
175
176		pvec.nr = nr_pages;
177		pagevec_release(&pvec);
178		cond_resched();
179	}
180}
181
182const struct fscache_cookie_def v9fs_cache_inode_index_def = {
183	.name		= "9p.inode",
184	.type		= FSCACHE_COOKIE_TYPE_DATAFILE,
185	.get_key	= v9fs_cache_inode_get_key,
186	.get_attr	= v9fs_cache_inode_get_attr,
187	.get_aux	= v9fs_cache_inode_get_aux,
188	.check_aux	= v9fs_cache_inode_check_aux,
189	.now_uncached	= v9fs_cache_inode_now_uncached,
190};
191
192void v9fs_cache_inode_get_cookie(struct inode *inode)
193{
194	struct v9fs_inode *v9inode;
195	struct v9fs_session_info *v9ses;
196
197	if (!S_ISREG(inode->i_mode))
198		return;
199
200	v9inode = V9FS_I(inode);
201	if (v9inode->fscache)
202		return;
203
204	v9ses = v9fs_inode2v9ses(inode);
205	v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
206						  &v9fs_cache_inode_index_def,
207						  v9inode, true);
208
209	p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
210		 inode, v9inode->fscache);
211}
212
213void v9fs_cache_inode_put_cookie(struct inode *inode)
214{
215	struct v9fs_inode *v9inode = V9FS_I(inode);
216
217	if (!v9inode->fscache)
218		return;
219	p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n",
220		 inode, v9inode->fscache);
221
222	fscache_relinquish_cookie(v9inode->fscache, 0);
223	v9inode->fscache = NULL;
224}
225
226void v9fs_cache_inode_flush_cookie(struct inode *inode)
227{
228	struct v9fs_inode *v9inode = V9FS_I(inode);
229
230	if (!v9inode->fscache)
231		return;
232	p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n",
233		 inode, v9inode->fscache);
234
235	fscache_relinquish_cookie(v9inode->fscache, 1);
236	v9inode->fscache = NULL;
237}
238
239void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
240{
241	struct v9fs_inode *v9inode = V9FS_I(inode);
242
243	if (!v9inode->fscache)
244		return;
245
246	spin_lock(&v9inode->fscache_lock);
247
248	if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
249		v9fs_cache_inode_flush_cookie(inode);
250	else
251		v9fs_cache_inode_get_cookie(inode);
252
253	spin_unlock(&v9inode->fscache_lock);
254}
255
256void v9fs_cache_inode_reset_cookie(struct inode *inode)
257{
258	struct v9fs_inode *v9inode = V9FS_I(inode);
259	struct v9fs_session_info *v9ses;
260	struct fscache_cookie *old;
261
262	if (!v9inode->fscache)
263		return;
264
265	old = v9inode->fscache;
266
267	spin_lock(&v9inode->fscache_lock);
268	fscache_relinquish_cookie(v9inode->fscache, 1);
269
270	v9ses = v9fs_inode2v9ses(inode);
271	v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
272						  &v9fs_cache_inode_index_def,
273						  v9inode, true);
274	p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
275		 inode, old, v9inode->fscache);
276
277	spin_unlock(&v9inode->fscache_lock);
278}
279
280int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
281{
282	struct inode *inode = page->mapping->host;
283	struct v9fs_inode *v9inode = V9FS_I(inode);
284
285	BUG_ON(!v9inode->fscache);
286
287	return fscache_maybe_release_page(v9inode->fscache, page, gfp);
288}
289
290void __v9fs_fscache_invalidate_page(struct page *page)
291{
292	struct inode *inode = page->mapping->host;
293	struct v9fs_inode *v9inode = V9FS_I(inode);
294
295	BUG_ON(!v9inode->fscache);
296
297	if (PageFsCache(page)) {
298		fscache_wait_on_page_write(v9inode->fscache, page);
299		BUG_ON(!PageLocked(page));
300		fscache_uncache_page(v9inode->fscache, page);
301	}
302}
303
304static void v9fs_vfs_readpage_complete(struct page *page, void *data,
305				       int error)
306{
307	if (!error)
308		SetPageUptodate(page);
309
310	unlock_page(page);
311}
312
313/**
314 * __v9fs_readpage_from_fscache - read a page from cache
315 *
316 * Returns 0 if the pages are in cache and a BIO is submitted,
317 * 1 if the pages are not in cache and -error otherwise.
318 */
319
320int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
321{
322	int ret;
323	const struct v9fs_inode *v9inode = V9FS_I(inode);
324
325	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
326	if (!v9inode->fscache)
327		return -ENOBUFS;
328
329	ret = fscache_read_or_alloc_page(v9inode->fscache,
330					 page,
331					 v9fs_vfs_readpage_complete,
332					 NULL,
333					 GFP_KERNEL);
334	switch (ret) {
335	case -ENOBUFS:
336	case -ENODATA:
337		p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret);
338		return 1;
339	case 0:
340		p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
341		return ret;
342	default:
343		p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
344		return ret;
345	}
346}
347
348/**
349 * __v9fs_readpages_from_fscache - read multiple pages from cache
350 *
351 * Returns 0 if the pages are in cache and a BIO is submitted,
352 * 1 if the pages are not in cache and -error otherwise.
353 */
354
355int __v9fs_readpages_from_fscache(struct inode *inode,
356				  struct address_space *mapping,
357				  struct list_head *pages,
358				  unsigned *nr_pages)
359{
360	int ret;
361	const struct v9fs_inode *v9inode = V9FS_I(inode);
362
363	p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages);
364	if (!v9inode->fscache)
365		return -ENOBUFS;
366
367	ret = fscache_read_or_alloc_pages(v9inode->fscache,
368					  mapping, pages, nr_pages,
369					  v9fs_vfs_readpage_complete,
370					  NULL,
371					  mapping_gfp_mask(mapping));
372	switch (ret) {
373	case -ENOBUFS:
374	case -ENODATA:
375		p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret);
376		return 1;
377	case 0:
378		BUG_ON(!list_empty(pages));
379		BUG_ON(*nr_pages != 0);
380		p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
381		return ret;
382	default:
383		p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
384		return ret;
385	}
386}
387
388/**
389 * __v9fs_readpage_to_fscache - write a page to the cache
390 *
391 */
392
393void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
394{
395	int ret;
396	const struct v9fs_inode *v9inode = V9FS_I(inode);
397
398	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
399	ret = fscache_write_page(v9inode->fscache, page, GFP_KERNEL);
400	p9_debug(P9_DEBUG_FSC, "ret =  %d\n", ret);
401	if (ret != 0)
402		v9fs_uncache_page(inode, page);
403}
404
405/*
406 * wait for a page to complete writing to the cache
407 */
408void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
409{
410	const struct v9fs_inode *v9inode = V9FS_I(inode);
411	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
412	if (PageFsCache(page))
413		fscache_wait_on_page_write(v9inode->fscache, page);
414}
415