1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_page for OSC layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40 
41 #define DEBUG_SUBSYSTEM S_OSC
42 
43 #include "osc_cl_internal.h"
44 
45 static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del);
46 static void osc_lru_add(struct client_obd *cli, struct osc_page *opg);
47 static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
48 			   struct osc_page *opg);
49 
50 /** \addtogroup osc
51  *  @{
52  */
53 
54 /*
55  * Comment out osc_page_protected because it may sleep inside the
56  * the client_obd_list_lock.
57  * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
58  *   -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
59  *   -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
60  */
61 #if 0
62 static int osc_page_is_dlocked(const struct lu_env *env,
63 			       const struct osc_page *opg,
64 			       enum cl_lock_mode mode, int pending, int unref)
65 {
66 	struct cl_page	 *page;
67 	struct osc_object      *obj;
68 	struct osc_thread_info *info;
69 	struct ldlm_res_id     *resname;
70 	struct lustre_handle   *lockh;
71 	ldlm_policy_data_t     *policy;
72 	ldlm_mode_t	     dlmmode;
73 	__u64                   flags;
74 
75 	might_sleep();
76 
77 	info = osc_env_info(env);
78 	resname = &info->oti_resname;
79 	policy = &info->oti_policy;
80 	lockh = &info->oti_handle;
81 	page = opg->ops_cl.cpl_page;
82 	obj = cl2osc(opg->ops_cl.cpl_obj);
83 
84 	flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
85 	if (pending)
86 		flags |= LDLM_FL_CBPENDING;
87 
88 	dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
89 	osc_lock_build_res(env, obj, resname);
90 	osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
91 	return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
92 			      dlmmode, &flags, NULL, lockh, unref);
93 }
94 
95 /**
96  * Checks an invariant that a page in the cache is covered by a lock, as
97  * needed.
98  */
99 static int osc_page_protected(const struct lu_env *env,
100 			      const struct osc_page *opg,
101 			      enum cl_lock_mode mode, int unref)
102 {
103 	struct cl_object_header *hdr;
104 	struct cl_lock	  *scan;
105 	struct cl_page	  *page;
106 	struct cl_lock_descr    *descr;
107 	int result;
108 
109 	LINVRNT(!opg->ops_temp);
110 
111 	page = opg->ops_cl.cpl_page;
112 	if (page->cp_owner != NULL &&
113 	    cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
114 		/*
115 		 * If IO is done without locks (liblustre, or lloop), lock is
116 		 * not required.
117 		 */
118 		result = 1;
119 	else
120 		/* otherwise check for a DLM lock */
121 	result = osc_page_is_dlocked(env, opg, mode, 1, unref);
122 	if (result == 0) {
123 		/* maybe this page is a part of a lockless io? */
124 		hdr = cl_object_header(opg->ops_cl.cpl_obj);
125 		descr = &osc_env_info(env)->oti_descr;
126 		descr->cld_mode = mode;
127 		descr->cld_start = page->cp_index;
128 		descr->cld_end   = page->cp_index;
129 		spin_lock(&hdr->coh_lock_guard);
130 		list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
131 			/*
132 			 * Lock-less sub-lock has to be either in HELD state
133 			 * (when io is actively going on), or in CACHED state,
134 			 * when top-lock is being unlocked:
135 			 * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
136 			 */
137 			if ((scan->cll_state == CLS_HELD ||
138 			     scan->cll_state == CLS_CACHED) &&
139 			    cl_lock_ext_match(&scan->cll_descr, descr)) {
140 				struct osc_lock *olck;
141 
142 				olck = osc_lock_at(scan);
143 				result = osc_lock_is_lockless(olck);
144 				break;
145 			}
146 		}
147 		spin_unlock(&hdr->coh_lock_guard);
148 	}
149 	return result;
150 }
151 #else
osc_page_protected(const struct lu_env * env,const struct osc_page * opg,enum cl_lock_mode mode,int unref)152 static int osc_page_protected(const struct lu_env *env,
153 			      const struct osc_page *opg,
154 			      enum cl_lock_mode mode, int unref)
155 {
156 	return 1;
157 }
158 #endif
159 
160 /*****************************************************************************
161  *
162  * Page operations.
163  *
164  */
osc_page_fini(const struct lu_env * env,struct cl_page_slice * slice)165 static void osc_page_fini(const struct lu_env *env,
166 			  struct cl_page_slice *slice)
167 {
168 	struct osc_page *opg = cl2osc_page(slice);
169 	CDEBUG(D_TRACE, "%p\n", opg);
170 	LASSERT(opg->ops_lock == NULL);
171 }
172 
osc_page_transfer_get(struct osc_page * opg,const char * label)173 static void osc_page_transfer_get(struct osc_page *opg, const char *label)
174 {
175 	struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
176 
177 	LASSERT(!opg->ops_transfer_pinned);
178 	cl_page_get(page);
179 	lu_ref_add_atomic(&page->cp_reference, label, page);
180 	opg->ops_transfer_pinned = 1;
181 }
182 
osc_page_transfer_put(const struct lu_env * env,struct osc_page * opg)183 static void osc_page_transfer_put(const struct lu_env *env,
184 				  struct osc_page *opg)
185 {
186 	struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
187 
188 	if (opg->ops_transfer_pinned) {
189 		lu_ref_del(&page->cp_reference, "transfer", page);
190 		opg->ops_transfer_pinned = 0;
191 		cl_page_put(env, page);
192 	}
193 }
194 
195 /**
196  * This is called once for every page when it is submitted for a transfer
197  * either opportunistic (osc_page_cache_add()), or immediate
198  * (osc_page_submit()).
199  */
osc_page_transfer_add(const struct lu_env * env,struct osc_page * opg,enum cl_req_type crt)200 static void osc_page_transfer_add(const struct lu_env *env,
201 				  struct osc_page *opg, enum cl_req_type crt)
202 {
203 	struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
204 
205 	/* ops_lru and ops_inflight share the same field, so take it from LRU
206 	 * first and then use it as inflight. */
207 	osc_lru_del(osc_cli(obj), opg, false);
208 
209 	spin_lock(&obj->oo_seatbelt);
210 	list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
211 	opg->ops_submitter = current;
212 	spin_unlock(&obj->oo_seatbelt);
213 }
214 
osc_page_cache_add(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * io)215 static int osc_page_cache_add(const struct lu_env *env,
216 			      const struct cl_page_slice *slice,
217 			      struct cl_io *io)
218 {
219 	struct osc_io   *oio = osc_env_io(env);
220 	struct osc_page *opg = cl2osc_page(slice);
221 	int result;
222 
223 	LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
224 
225 	osc_page_transfer_get(opg, "transfer\0cache");
226 	result = osc_queue_async_io(env, io, opg);
227 	if (result != 0)
228 		osc_page_transfer_put(env, opg);
229 	else
230 		osc_page_transfer_add(env, opg, CRT_WRITE);
231 
232 	/* for sync write, kernel will wait for this page to be flushed before
233 	 * osc_io_end() is called, so release it earlier.
234 	 * for mkwrite(), it's known there is no further pages. */
235 	if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) {
236 		if (oio->oi_active != NULL) {
237 			osc_extent_release(env, oio->oi_active);
238 			oio->oi_active = NULL;
239 		}
240 	}
241 
242 	return result;
243 }
244 
osc_index2policy(ldlm_policy_data_t * policy,const struct cl_object * obj,pgoff_t start,pgoff_t end)245 void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
246 		      pgoff_t start, pgoff_t end)
247 {
248 	memset(policy, 0, sizeof(*policy));
249 	policy->l_extent.start = cl_offset(obj, start);
250 	policy->l_extent.end   = cl_offset(obj, end + 1) - 1;
251 }
252 
osc_page_addref_lock(const struct lu_env * env,struct osc_page * opg,struct cl_lock * lock)253 static int osc_page_addref_lock(const struct lu_env *env,
254 				struct osc_page *opg,
255 				struct cl_lock *lock)
256 {
257 	struct osc_lock *olock;
258 	int	      rc;
259 
260 	LASSERT(opg->ops_lock == NULL);
261 
262 	olock = osc_lock_at(lock);
263 	if (atomic_inc_return(&olock->ols_pageref) <= 0) {
264 		atomic_dec(&olock->ols_pageref);
265 		rc = -ENODATA;
266 	} else {
267 		cl_lock_get(lock);
268 		opg->ops_lock = lock;
269 		rc = 0;
270 	}
271 	return rc;
272 }
273 
osc_page_putref_lock(const struct lu_env * env,struct osc_page * opg)274 static void osc_page_putref_lock(const struct lu_env *env,
275 				 struct osc_page *opg)
276 {
277 	struct cl_lock  *lock = opg->ops_lock;
278 	struct osc_lock *olock;
279 
280 	LASSERT(lock != NULL);
281 	olock = osc_lock_at(lock);
282 
283 	atomic_dec(&olock->ols_pageref);
284 	opg->ops_lock = NULL;
285 
286 	cl_lock_put(env, lock);
287 }
288 
osc_page_is_under_lock(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)289 static int osc_page_is_under_lock(const struct lu_env *env,
290 				  const struct cl_page_slice *slice,
291 				  struct cl_io *unused)
292 {
293 	struct cl_lock *lock;
294 	int	     result = -ENODATA;
295 
296 	lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
297 			       NULL, 1, 0);
298 	if (lock != NULL) {
299 		if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
300 			result = -EBUSY;
301 		cl_lock_put(env, lock);
302 	}
303 	return result;
304 }
305 
osc_page_disown(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * io)306 static void osc_page_disown(const struct lu_env *env,
307 			    const struct cl_page_slice *slice,
308 			    struct cl_io *io)
309 {
310 	struct osc_page *opg = cl2osc_page(slice);
311 
312 	if (unlikely(opg->ops_lock))
313 		osc_page_putref_lock(env, opg);
314 }
315 
osc_page_completion_read(const struct lu_env * env,const struct cl_page_slice * slice,int ioret)316 static void osc_page_completion_read(const struct lu_env *env,
317 				     const struct cl_page_slice *slice,
318 				     int ioret)
319 {
320 	struct osc_page   *opg = cl2osc_page(slice);
321 	struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
322 
323 	if (likely(opg->ops_lock))
324 		osc_page_putref_lock(env, opg);
325 	osc_lru_add(osc_cli(obj), opg);
326 }
327 
osc_page_completion_write(const struct lu_env * env,const struct cl_page_slice * slice,int ioret)328 static void osc_page_completion_write(const struct lu_env *env,
329 				      const struct cl_page_slice *slice,
330 				      int ioret)
331 {
332 	struct osc_page   *opg = cl2osc_page(slice);
333 	struct osc_object *obj = cl2osc(slice->cpl_obj);
334 
335 	osc_lru_add(osc_cli(obj), opg);
336 }
337 
osc_page_fail(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)338 static int osc_page_fail(const struct lu_env *env,
339 			 const struct cl_page_slice *slice,
340 			 struct cl_io *unused)
341 {
342 	/*
343 	 * Cached read?
344 	 */
345 	LBUG();
346 	return 0;
347 }
348 
349 
osc_list(struct list_head * head)350 static const char *osc_list(struct list_head *head)
351 {
352 	return list_empty(head) ? "-" : "+";
353 }
354 
osc_submit_duration(struct osc_page * opg)355 static inline unsigned long osc_submit_duration(struct osc_page *opg)
356 {
357 	if (opg->ops_submit_time == 0)
358 		return 0;
359 
360 	return (cfs_time_current() - opg->ops_submit_time);
361 }
362 
osc_page_print(const struct lu_env * env,const struct cl_page_slice * slice,void * cookie,lu_printer_t printer)363 static int osc_page_print(const struct lu_env *env,
364 			  const struct cl_page_slice *slice,
365 			  void *cookie, lu_printer_t printer)
366 {
367 	struct osc_page       *opg = cl2osc_page(slice);
368 	struct osc_async_page *oap = &opg->ops_oap;
369 	struct osc_object     *obj = cl2osc(slice->cpl_obj);
370 	struct client_obd     *cli = &osc_export(obj)->exp_obd->u.cli;
371 
372 	return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
373 			  opg,
374 			  /* 1 */
375 			  oap->oap_magic, oap->oap_cmd,
376 			  oap->oap_interrupted,
377 			  osc_list(&oap->oap_pending_item),
378 			  osc_list(&oap->oap_rpc_item),
379 			  /* 2 */
380 			  oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
381 			  oap->oap_async_flags, oap->oap_brw_flags,
382 			  oap->oap_request, oap->oap_cli, obj,
383 			  /* 3 */
384 			  osc_list(&opg->ops_inflight),
385 			  opg->ops_submitter, opg->ops_transfer_pinned,
386 			  osc_submit_duration(opg), opg->ops_srvlock,
387 			  /* 4 */
388 			  cli->cl_r_in_flight, cli->cl_w_in_flight,
389 			  cli->cl_max_rpcs_in_flight,
390 			  cli->cl_avail_grant,
391 			  osc_list(&cli->cl_cache_waiters),
392 			  osc_list(&cli->cl_loi_ready_list),
393 			  osc_list(&cli->cl_loi_hp_ready_list),
394 			  osc_list(&cli->cl_loi_write_list),
395 			  osc_list(&cli->cl_loi_read_list),
396 			  /* 5 */
397 			  osc_list(&obj->oo_ready_item),
398 			  osc_list(&obj->oo_hp_ready_item),
399 			  osc_list(&obj->oo_write_item),
400 			  osc_list(&obj->oo_read_item),
401 			  atomic_read(&obj->oo_nr_reads),
402 			  osc_list(&obj->oo_reading_exts),
403 			  atomic_read(&obj->oo_nr_writes),
404 			  osc_list(&obj->oo_hp_exts),
405 			  osc_list(&obj->oo_urgent_exts));
406 }
407 
osc_page_delete(const struct lu_env * env,const struct cl_page_slice * slice)408 static void osc_page_delete(const struct lu_env *env,
409 			    const struct cl_page_slice *slice)
410 {
411 	struct osc_page   *opg = cl2osc_page(slice);
412 	struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
413 	int rc;
414 
415 	LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
416 
417 	CDEBUG(D_TRACE, "%p\n", opg);
418 	osc_page_transfer_put(env, opg);
419 	rc = osc_teardown_async_page(env, obj, opg);
420 	if (rc) {
421 		CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
422 			      "Trying to teardown failed: %d\n", rc);
423 		LASSERT(0);
424 	}
425 
426 	spin_lock(&obj->oo_seatbelt);
427 	if (opg->ops_submitter != NULL) {
428 		LASSERT(!list_empty(&opg->ops_inflight));
429 		list_del_init(&opg->ops_inflight);
430 		opg->ops_submitter = NULL;
431 	}
432 	spin_unlock(&obj->oo_seatbelt);
433 
434 	osc_lru_del(osc_cli(obj), opg, true);
435 }
436 
osc_page_clip(const struct lu_env * env,const struct cl_page_slice * slice,int from,int to)437 void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
438 		   int from, int to)
439 {
440 	struct osc_page       *opg = cl2osc_page(slice);
441 	struct osc_async_page *oap = &opg->ops_oap;
442 
443 	LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
444 
445 	opg->ops_from = from;
446 	opg->ops_to   = to;
447 	spin_lock(&oap->oap_lock);
448 	oap->oap_async_flags |= ASYNC_COUNT_STABLE;
449 	spin_unlock(&oap->oap_lock);
450 }
451 
osc_page_cancel(const struct lu_env * env,const struct cl_page_slice * slice)452 static int osc_page_cancel(const struct lu_env *env,
453 			   const struct cl_page_slice *slice)
454 {
455 	struct osc_page *opg = cl2osc_page(slice);
456 	int rc = 0;
457 
458 	LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
459 
460 	/* Check if the transferring against this page
461 	 * is completed, or not even queued. */
462 	if (opg->ops_transfer_pinned)
463 		/* FIXME: may not be interrupted.. */
464 		rc = osc_cancel_async_page(env, opg);
465 	LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
466 	return rc;
467 }
468 
osc_page_flush(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * io)469 static int osc_page_flush(const struct lu_env *env,
470 			  const struct cl_page_slice *slice,
471 			  struct cl_io *io)
472 {
473 	struct osc_page *opg = cl2osc_page(slice);
474 	int rc = 0;
475 
476 	rc = osc_flush_async_page(env, io, opg);
477 	return rc;
478 }
479 
480 static const struct cl_page_operations osc_page_ops = {
481 	.cpo_fini	  = osc_page_fini,
482 	.cpo_print	 = osc_page_print,
483 	.cpo_delete	= osc_page_delete,
484 	.cpo_is_under_lock = osc_page_is_under_lock,
485 	.cpo_disown	= osc_page_disown,
486 	.io = {
487 		[CRT_READ] = {
488 			.cpo_cache_add  = osc_page_fail,
489 			.cpo_completion = osc_page_completion_read
490 		},
491 		[CRT_WRITE] = {
492 			.cpo_cache_add  = osc_page_cache_add,
493 			.cpo_completion = osc_page_completion_write
494 		}
495 	},
496 	.cpo_clip	   = osc_page_clip,
497 	.cpo_cancel	 = osc_page_cancel,
498 	.cpo_flush	  = osc_page_flush
499 };
500 
osc_page_init(const struct lu_env * env,struct cl_object * obj,struct cl_page * page,struct page * vmpage)501 int osc_page_init(const struct lu_env *env, struct cl_object *obj,
502 		struct cl_page *page, struct page *vmpage)
503 {
504 	struct osc_object *osc = cl2osc(obj);
505 	struct osc_page   *opg = cl_object_page_slice(obj, page);
506 	int result;
507 
508 	opg->ops_from = 0;
509 	opg->ops_to   = PAGE_CACHE_SIZE;
510 
511 	result = osc_prep_async_page(osc, opg, vmpage,
512 					cl_offset(obj, page->cp_index));
513 	if (result == 0) {
514 		struct osc_io *oio = osc_env_io(env);
515 		opg->ops_srvlock = osc_io_srvlock(oio);
516 		cl_page_slice_add(page, &opg->ops_cl, obj,
517 				&osc_page_ops);
518 	}
519 	/*
520 	 * Cannot assert osc_page_protected() here as read-ahead
521 	 * creates temporary pages outside of a lock.
522 	 */
523 	/* ops_inflight and ops_lru are the same field, but it doesn't
524 	 * hurt to initialize it twice :-) */
525 	INIT_LIST_HEAD(&opg->ops_inflight);
526 	INIT_LIST_HEAD(&opg->ops_lru);
527 
528 	/* reserve an LRU space for this page */
529 	if (page->cp_type == CPT_CACHEABLE && result == 0)
530 		result = osc_lru_reserve(env, osc, opg);
531 
532 	return result;
533 }
534 
535 /**
536  * Helper function called by osc_io_submit() for every page in an immediate
537  * transfer (i.e., transferred synchronously).
538  */
osc_page_submit(const struct lu_env * env,struct osc_page * opg,enum cl_req_type crt,int brw_flags)539 void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
540 		     enum cl_req_type crt, int brw_flags)
541 {
542 	struct osc_async_page *oap = &opg->ops_oap;
543 	struct osc_object     *obj = oap->oap_obj;
544 
545 	LINVRNT(osc_page_protected(env, opg,
546 				   crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
547 
548 	LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, magic 0x%x\n",
549 		 oap, oap->oap_magic);
550 	LASSERT(oap->oap_async_flags & ASYNC_READY);
551 	LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE);
552 
553 	oap->oap_cmd       = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
554 	oap->oap_page_off  = opg->ops_from;
555 	oap->oap_count     = opg->ops_to - opg->ops_from;
556 	oap->oap_brw_flags = OBD_BRW_SYNC | brw_flags;
557 
558 	if (!client_is_remote(osc_export(obj)) &&
559 			capable(CFS_CAP_SYS_RESOURCE)) {
560 		oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
561 		oap->oap_cmd |= OBD_BRW_NOQUOTA;
562 	}
563 
564 	opg->ops_submit_time = cfs_time_current();
565 	osc_page_transfer_get(opg, "transfer\0imm");
566 	osc_page_transfer_add(env, opg, crt);
567 }
568 
569 /* --------------- LRU page management ------------------ */
570 
571 /* OSC is a natural place to manage LRU pages as applications are specialized
572  * to write OSC by OSC. Ideally, if one OSC is used more frequently it should
573  * occupy more LRU slots. On the other hand, we should avoid using up all LRU
574  * slots (client_obd::cl_lru_left) otherwise process has to be put into sleep
575  * for free LRU slots - this will be very bad so the algorithm requires each
576  * OSC to free slots voluntarily to maintain a reasonable number of free slots
577  * at any time.
578  */
579 
580 static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
581 static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
582 /* LRU pages are freed in batch mode. OSC should at least free this
583  * number of pages to avoid running out of LRU budget, and.. */
584 static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT);  /* 2M */
585 /* free this number at most otherwise it will take too long time to finish. */
586 static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
587 
588 /* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
589  * we should free slots aggressively. In this way, slots are freed in a steady
590  * step to maintain fairness among OSCs.
591  *
592  * Return how many LRU pages should be freed. */
osc_cache_too_much(struct client_obd * cli)593 static int osc_cache_too_much(struct client_obd *cli)
594 {
595 	struct cl_client_cache *cache = cli->cl_cache;
596 	int pages = atomic_read(&cli->cl_lru_in_list) >> 1;
597 
598 	if (atomic_read(&osc_lru_waiters) > 0 &&
599 	    atomic_read(cli->cl_lru_left) < lru_shrink_max)
600 		/* drop lru pages aggressively */
601 		return min(pages, lru_shrink_max);
602 
603 	/* if it's going to run out LRU slots, we should free some, but not
604 	 * too much to maintain fairness among OSCs. */
605 	if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
606 		unsigned long tmp;
607 
608 		tmp = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
609 		if (pages > tmp)
610 			return min(pages, lru_shrink_max);
611 
612 		return pages > lru_shrink_min ? lru_shrink_min : 0;
613 	}
614 
615 	return 0;
616 }
617 
618 /* Return how many pages are not discarded in @pvec. */
discard_pagevec(const struct lu_env * env,struct cl_io * io,struct cl_page ** pvec,int max_index)619 static int discard_pagevec(const struct lu_env *env, struct cl_io *io,
620 			   struct cl_page **pvec, int max_index)
621 {
622 	int count;
623 	int i;
624 
625 	for (count = 0, i = 0; i < max_index; i++) {
626 		struct cl_page *page = pvec[i];
627 		if (cl_page_own_try(env, io, page) == 0) {
628 			/* free LRU page only if nobody is using it.
629 			 * This check is necessary to avoid freeing the pages
630 			 * having already been removed from LRU and pinned
631 			 * for IO. */
632 			if (!cl_page_in_use(page)) {
633 				cl_page_unmap(env, io, page);
634 				cl_page_discard(env, io, page);
635 				++count;
636 			}
637 			cl_page_disown(env, io, page);
638 		}
639 		cl_page_put(env, page);
640 		pvec[i] = NULL;
641 	}
642 	return max_index - count;
643 }
644 
645 /**
646  * Drop @target of pages from LRU at most.
647  */
osc_lru_shrink(struct client_obd * cli,int target)648 int osc_lru_shrink(struct client_obd *cli, int target)
649 {
650 	struct cl_env_nest nest;
651 	struct lu_env *env;
652 	struct cl_io *io;
653 	struct cl_object *clobj = NULL;
654 	struct cl_page **pvec;
655 	struct osc_page *opg;
656 	int maxscan = 0;
657 	int count = 0;
658 	int index = 0;
659 	int rc = 0;
660 
661 	LASSERT(atomic_read(&cli->cl_lru_in_list) >= 0);
662 	if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
663 		return 0;
664 
665 	env = cl_env_nested_get(&nest);
666 	if (IS_ERR(env))
667 		return PTR_ERR(env);
668 
669 	pvec = osc_env_info(env)->oti_pvec;
670 	io = &osc_env_info(env)->oti_io;
671 
672 	client_obd_list_lock(&cli->cl_lru_list_lock);
673 	atomic_inc(&cli->cl_lru_shrinkers);
674 	maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list));
675 	while (!list_empty(&cli->cl_lru_list)) {
676 		struct cl_page *page;
677 
678 		if (--maxscan < 0)
679 			break;
680 
681 		opg = list_entry(cli->cl_lru_list.next, struct osc_page,
682 				     ops_lru);
683 		page = cl_page_top(opg->ops_cl.cpl_page);
684 		if (cl_page_in_use_noref(page)) {
685 			list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
686 			continue;
687 		}
688 
689 		LASSERT(page->cp_obj != NULL);
690 		if (clobj != page->cp_obj) {
691 			struct cl_object *tmp = page->cp_obj;
692 
693 			cl_object_get(tmp);
694 			client_obd_list_unlock(&cli->cl_lru_list_lock);
695 
696 			if (clobj != NULL) {
697 				count -= discard_pagevec(env, io, pvec, index);
698 				index = 0;
699 
700 				cl_io_fini(env, io);
701 				cl_object_put(env, clobj);
702 				clobj = NULL;
703 			}
704 
705 			clobj = tmp;
706 			io->ci_obj = clobj;
707 			io->ci_ignore_layout = 1;
708 			rc = cl_io_init(env, io, CIT_MISC, clobj);
709 
710 			client_obd_list_lock(&cli->cl_lru_list_lock);
711 
712 			if (rc != 0)
713 				break;
714 
715 			++maxscan;
716 			continue;
717 		}
718 
719 		/* move this page to the end of list as it will be discarded
720 		 * soon. The page will be finally removed from LRU list in
721 		 * osc_page_delete().  */
722 		list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
723 
724 		/* it's okay to grab a refcount here w/o holding lock because
725 		 * it has to grab cl_lru_list_lock to delete the page. */
726 		cl_page_get(page);
727 		pvec[index++] = page;
728 		if (++count >= target)
729 			break;
730 
731 		if (unlikely(index == OTI_PVEC_SIZE)) {
732 			client_obd_list_unlock(&cli->cl_lru_list_lock);
733 			count -= discard_pagevec(env, io, pvec, index);
734 			index = 0;
735 
736 			client_obd_list_lock(&cli->cl_lru_list_lock);
737 		}
738 	}
739 	client_obd_list_unlock(&cli->cl_lru_list_lock);
740 
741 	if (clobj != NULL) {
742 		count -= discard_pagevec(env, io, pvec, index);
743 
744 		cl_io_fini(env, io);
745 		cl_object_put(env, clobj);
746 	}
747 	cl_env_nested_put(&nest, env);
748 
749 	atomic_dec(&cli->cl_lru_shrinkers);
750 	return count > 0 ? count : rc;
751 }
752 
osc_lru_add(struct client_obd * cli,struct osc_page * opg)753 static void osc_lru_add(struct client_obd *cli, struct osc_page *opg)
754 {
755 	bool wakeup = false;
756 
757 	if (!opg->ops_in_lru)
758 		return;
759 
760 	atomic_dec(&cli->cl_lru_busy);
761 	client_obd_list_lock(&cli->cl_lru_list_lock);
762 	if (list_empty(&opg->ops_lru)) {
763 		list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
764 		atomic_inc_return(&cli->cl_lru_in_list);
765 		wakeup = atomic_read(&osc_lru_waiters) > 0;
766 	}
767 	client_obd_list_unlock(&cli->cl_lru_list_lock);
768 
769 	if (wakeup) {
770 		osc_lru_shrink(cli, osc_cache_too_much(cli));
771 		wake_up_all(&osc_lru_waitq);
772 	}
773 }
774 
775 /* delete page from LRUlist. The page can be deleted from LRUlist for two
776  * reasons: redirtied or deleted from page cache. */
osc_lru_del(struct client_obd * cli,struct osc_page * opg,bool del)777 static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
778 {
779 	if (opg->ops_in_lru) {
780 		client_obd_list_lock(&cli->cl_lru_list_lock);
781 		if (!list_empty(&opg->ops_lru)) {
782 			LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
783 			list_del_init(&opg->ops_lru);
784 			atomic_dec(&cli->cl_lru_in_list);
785 			if (!del)
786 				atomic_inc(&cli->cl_lru_busy);
787 		} else if (del) {
788 			LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
789 			atomic_dec(&cli->cl_lru_busy);
790 		}
791 		client_obd_list_unlock(&cli->cl_lru_list_lock);
792 		if (del) {
793 			atomic_inc(cli->cl_lru_left);
794 			/* this is a great place to release more LRU pages if
795 			 * this osc occupies too many LRU pages and kernel is
796 			 * stealing one of them.
797 			 * cl_lru_shrinkers is to avoid recursive call in case
798 			 * we're already in the context of osc_lru_shrink(). */
799 			if (atomic_read(&cli->cl_lru_shrinkers) == 0 &&
800 			    !memory_pressure_get())
801 				osc_lru_shrink(cli, osc_cache_too_much(cli));
802 			wake_up(&osc_lru_waitq);
803 		}
804 	} else {
805 		LASSERT(list_empty(&opg->ops_lru));
806 	}
807 }
808 
max_to_shrink(struct client_obd * cli)809 static inline int max_to_shrink(struct client_obd *cli)
810 {
811 	return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
812 }
813 
osc_lru_reclaim(struct client_obd * cli)814 static int osc_lru_reclaim(struct client_obd *cli)
815 {
816 	struct cl_client_cache *cache = cli->cl_cache;
817 	int max_scans;
818 	int rc;
819 
820 	LASSERT(cache != NULL);
821 	LASSERT(!list_empty(&cache->ccc_lru));
822 
823 	rc = osc_lru_shrink(cli, lru_shrink_min);
824 	if (rc != 0) {
825 		CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
826 			cli->cl_import->imp_obd->obd_name, rc, cli);
827 		return rc;
828 	}
829 
830 	CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
831 		cli->cl_import->imp_obd->obd_name, cli,
832 		atomic_read(&cli->cl_lru_in_list),
833 		atomic_read(&cli->cl_lru_busy));
834 
835 	/* Reclaim LRU slots from other client_obd as it can't free enough
836 	 * from its own. This should rarely happen. */
837 	spin_lock(&cache->ccc_lru_lock);
838 	cache->ccc_lru_shrinkers++;
839 	list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
840 
841 	max_scans = atomic_read(&cache->ccc_users);
842 	while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) {
843 		cli = list_entry(cache->ccc_lru.next, struct client_obd,
844 					cl_lru_osc);
845 
846 		CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
847 			cli->cl_import->imp_obd->obd_name, cli,
848 			atomic_read(&cli->cl_lru_in_list),
849 			atomic_read(&cli->cl_lru_busy));
850 
851 		list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
852 		if (atomic_read(&cli->cl_lru_in_list) > 0) {
853 			spin_unlock(&cache->ccc_lru_lock);
854 
855 			rc = osc_lru_shrink(cli, max_to_shrink(cli));
856 			spin_lock(&cache->ccc_lru_lock);
857 			if (rc != 0)
858 				break;
859 		}
860 	}
861 	spin_unlock(&cache->ccc_lru_lock);
862 
863 	CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n",
864 		cli->cl_import->imp_obd->obd_name, cli, rc);
865 	return rc;
866 }
867 
osc_lru_reserve(const struct lu_env * env,struct osc_object * obj,struct osc_page * opg)868 static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
869 			   struct osc_page *opg)
870 {
871 	struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
872 	struct client_obd *cli = osc_cli(obj);
873 	int rc = 0;
874 
875 	if (cli->cl_cache == NULL) /* shall not be in LRU */
876 		return 0;
877 
878 	LASSERT(atomic_read(cli->cl_lru_left) >= 0);
879 	while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) {
880 		int gen;
881 
882 		/* run out of LRU spaces, try to drop some by itself */
883 		rc = osc_lru_reclaim(cli);
884 		if (rc < 0)
885 			break;
886 		if (rc > 0)
887 			continue;
888 
889 		cond_resched();
890 
891 		/* slowest case, all of caching pages are busy, notifying
892 		 * other OSCs that we're lack of LRU slots. */
893 		atomic_inc(&osc_lru_waiters);
894 
895 		gen = atomic_read(&cli->cl_lru_in_list);
896 		rc = l_wait_event(osc_lru_waitq,
897 				atomic_read(cli->cl_lru_left) > 0 ||
898 				(atomic_read(&cli->cl_lru_in_list) > 0 &&
899 				 gen != atomic_read(&cli->cl_lru_in_list)),
900 				&lwi);
901 
902 		atomic_dec(&osc_lru_waiters);
903 		if (rc < 0)
904 			break;
905 	}
906 
907 	if (rc >= 0) {
908 		atomic_inc(&cli->cl_lru_busy);
909 		opg->ops_in_lru = 1;
910 		rc = 0;
911 	}
912 
913 	return rc;
914 }
915 
916 /** @} osc */
917