1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2012, Intel Corporation.
31 *
32 */
33 /*
34 * This file is part of Lustre, http://www.lustre.org/
35 * Lustre is a trademark of Sun Microsystems, Inc.
36 *
37 * osc cache management.
38 *
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
40 */
41
42 #define DEBUG_SUBSYSTEM S_OSC
43
44 #include "osc_cl_internal.h"
45 #include "osc_internal.h"
46
47 static int extent_debug; /* set it to be true for more debug */
48
49 static void osc_update_pending(struct osc_object *obj, int cmd, int delta);
50 static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
51 int state);
52 static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
53 struct osc_async_page *oap, int sent, int rc);
54 static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
55 int cmd);
56 static int osc_refresh_count(const struct lu_env *env,
57 struct osc_async_page *oap, int cmd);
58 static int osc_io_unplug_async(const struct lu_env *env,
59 struct client_obd *cli, struct osc_object *osc);
60 static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
61 unsigned int lost_grant);
62
63 static void osc_extent_tree_dump0(int level, struct osc_object *obj,
64 const char *func, int line);
65 #define osc_extent_tree_dump(lvl, obj) \
66 osc_extent_tree_dump0(lvl, obj, __func__, __LINE__)
67
68 /** \addtogroup osc
69 * @{
70 */
71
72 /* ------------------ osc extent ------------------ */
ext_flags(struct osc_extent * ext,char * flags)73 static inline char *ext_flags(struct osc_extent *ext, char *flags)
74 {
75 char *buf = flags;
76 *buf++ = ext->oe_rw ? 'r' : 'w';
77 if (ext->oe_intree)
78 *buf++ = 'i';
79 if (ext->oe_srvlock)
80 *buf++ = 's';
81 if (ext->oe_hp)
82 *buf++ = 'h';
83 if (ext->oe_urgent)
84 *buf++ = 'u';
85 if (ext->oe_memalloc)
86 *buf++ = 'm';
87 if (ext->oe_trunc_pending)
88 *buf++ = 't';
89 if (ext->oe_fsync_wait)
90 *buf++ = 'Y';
91 *buf = 0;
92 return flags;
93 }
94
list_empty_marker(struct list_head * list)95 static inline char list_empty_marker(struct list_head *list)
96 {
97 return list_empty(list) ? '-' : '+';
98 }
99
100 #define EXTSTR "[%lu -> %lu/%lu]"
101 #define EXTPARA(ext) (ext)->oe_start, (ext)->oe_end, (ext)->oe_max_end
102 static const char *oes_strings[] = {
103 "inv", "active", "cache", "locking", "lockdone", "rpc", "trunc", NULL };
104
105 #define OSC_EXTENT_DUMP(lvl, extent, fmt, ...) do { \
106 struct osc_extent *__ext = (extent); \
107 char __buf[16]; \
108 \
109 CDEBUG(lvl, \
110 "extent %p@{" EXTSTR ", " \
111 "[%d|%d|%c|%s|%s|%p], [%d|%d|%c|%c|%p|%u|%p]} " fmt, \
112 /* ----- extent part 0 ----- */ \
113 __ext, EXTPARA(__ext), \
114 /* ----- part 1 ----- */ \
115 atomic_read(&__ext->oe_refc), \
116 atomic_read(&__ext->oe_users), \
117 list_empty_marker(&__ext->oe_link), \
118 oes_strings[__ext->oe_state], ext_flags(__ext, __buf), \
119 __ext->oe_obj, \
120 /* ----- part 2 ----- */ \
121 __ext->oe_grants, __ext->oe_nr_pages, \
122 list_empty_marker(&__ext->oe_pages), \
123 waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \
124 __ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner, \
125 /* ----- part 4 ----- */ \
126 ## __VA_ARGS__); \
127 } while (0)
128
129 #undef EASSERTF
130 #define EASSERTF(expr, ext, fmt, args...) do { \
131 if (!(expr)) { \
132 OSC_EXTENT_DUMP(D_ERROR, (ext), fmt, ##args); \
133 osc_extent_tree_dump(D_ERROR, (ext)->oe_obj); \
134 LASSERT(expr); \
135 } \
136 } while (0)
137
138 #undef EASSERT
139 #define EASSERT(expr, ext) EASSERTF(expr, ext, "\n")
140
rb_extent(struct rb_node * n)141 static inline struct osc_extent *rb_extent(struct rb_node *n)
142 {
143 if (n == NULL)
144 return NULL;
145
146 return container_of(n, struct osc_extent, oe_node);
147 }
148
next_extent(struct osc_extent * ext)149 static inline struct osc_extent *next_extent(struct osc_extent *ext)
150 {
151 if (ext == NULL)
152 return NULL;
153
154 LASSERT(ext->oe_intree);
155 return rb_extent(rb_next(&ext->oe_node));
156 }
157
prev_extent(struct osc_extent * ext)158 static inline struct osc_extent *prev_extent(struct osc_extent *ext)
159 {
160 if (ext == NULL)
161 return NULL;
162
163 LASSERT(ext->oe_intree);
164 return rb_extent(rb_prev(&ext->oe_node));
165 }
166
first_extent(struct osc_object * obj)167 static inline struct osc_extent *first_extent(struct osc_object *obj)
168 {
169 return rb_extent(rb_first(&obj->oo_root));
170 }
171
172 /* object must be locked by caller. */
osc_extent_sanity_check0(struct osc_extent * ext,const char * func,const int line)173 static int osc_extent_sanity_check0(struct osc_extent *ext,
174 const char *func, const int line)
175 {
176 struct osc_object *obj = ext->oe_obj;
177 struct osc_async_page *oap;
178 int page_count;
179 int rc = 0;
180
181 if (!osc_object_is_locked(obj)) {
182 rc = 9;
183 goto out;
184 }
185
186 if (ext->oe_state >= OES_STATE_MAX) {
187 rc = 10;
188 goto out;
189 }
190
191 if (atomic_read(&ext->oe_refc) <= 0) {
192 rc = 20;
193 goto out;
194 }
195
196 if (atomic_read(&ext->oe_refc) < atomic_read(&ext->oe_users)) {
197 rc = 30;
198 goto out;
199 }
200
201 switch (ext->oe_state) {
202 case OES_INV:
203 if (ext->oe_nr_pages > 0 || !list_empty(&ext->oe_pages))
204 rc = 35;
205 else
206 rc = 0;
207 goto out;
208 case OES_ACTIVE:
209 if (atomic_read(&ext->oe_users) == 0) {
210 rc = 40;
211 goto out;
212 }
213 if (ext->oe_hp) {
214 rc = 50;
215 goto out;
216 }
217 if (ext->oe_fsync_wait && !ext->oe_urgent) {
218 rc = 55;
219 goto out;
220 }
221 break;
222 case OES_CACHE:
223 if (ext->oe_grants == 0) {
224 rc = 60;
225 goto out;
226 }
227 if (ext->oe_fsync_wait && !ext->oe_urgent && !ext->oe_hp) {
228 rc = 65;
229 goto out;
230 }
231 default:
232 if (atomic_read(&ext->oe_users) > 0) {
233 rc = 70;
234 goto out;
235 }
236 }
237
238 if (ext->oe_max_end < ext->oe_end || ext->oe_end < ext->oe_start) {
239 rc = 80;
240 goto out;
241 }
242
243 if (ext->oe_osclock == NULL && ext->oe_grants > 0) {
244 rc = 90;
245 goto out;
246 }
247
248 if (ext->oe_osclock) {
249 struct cl_lock_descr *descr;
250
251 descr = &ext->oe_osclock->cll_descr;
252 if (!(descr->cld_start <= ext->oe_start &&
253 descr->cld_end >= ext->oe_max_end)) {
254 rc = 100;
255 goto out;
256 }
257 }
258
259 if (ext->oe_nr_pages > ext->oe_mppr) {
260 rc = 105;
261 goto out;
262 }
263
264 /* Do not verify page list if extent is in RPC. This is because an
265 * in-RPC extent is supposed to be exclusively accessible w/o lock. */
266 if (ext->oe_state > OES_CACHE) {
267 rc = 0;
268 goto out;
269 }
270
271 if (!extent_debug) {
272 rc = 0;
273 goto out;
274 }
275
276 page_count = 0;
277 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
278 pgoff_t index = oap2cl_page(oap)->cp_index;
279 ++page_count;
280 if (index > ext->oe_end || index < ext->oe_start) {
281 rc = 110;
282 goto out;
283 }
284 }
285 if (page_count != ext->oe_nr_pages) {
286 rc = 120;
287 goto out;
288 }
289
290 out:
291 if (rc != 0)
292 OSC_EXTENT_DUMP(D_ERROR, ext,
293 "%s:%d sanity check %p failed with rc = %d\n",
294 func, line, ext, rc);
295 return rc;
296 }
297
298 #define sanity_check_nolock(ext) \
299 osc_extent_sanity_check0(ext, __func__, __LINE__)
300
301 #define sanity_check(ext) ({ \
302 int __res; \
303 osc_object_lock((ext)->oe_obj); \
304 __res = sanity_check_nolock(ext); \
305 osc_object_unlock((ext)->oe_obj); \
306 __res; \
307 })
308
309 /**
310 * sanity check - to make sure there is no overlapped extent in the tree.
311 */
osc_extent_is_overlapped(struct osc_object * obj,struct osc_extent * ext)312 static int osc_extent_is_overlapped(struct osc_object *obj,
313 struct osc_extent *ext)
314 {
315 struct osc_extent *tmp;
316
317 LASSERT(osc_object_is_locked(obj));
318
319 if (!extent_debug)
320 return 0;
321
322 for (tmp = first_extent(obj); tmp != NULL; tmp = next_extent(tmp)) {
323 if (tmp == ext)
324 continue;
325 if (tmp->oe_end >= ext->oe_start &&
326 tmp->oe_start <= ext->oe_end)
327 return 1;
328 }
329 return 0;
330 }
331
osc_extent_state_set(struct osc_extent * ext,int state)332 static void osc_extent_state_set(struct osc_extent *ext, int state)
333 {
334 LASSERT(osc_object_is_locked(ext->oe_obj));
335 LASSERT(state >= OES_INV && state < OES_STATE_MAX);
336
337 /* Never try to sanity check a state changing extent :-) */
338 /* LASSERT(sanity_check_nolock(ext) == 0); */
339
340 /* TODO: validate the state machine */
341 ext->oe_state = state;
342 wake_up_all(&ext->oe_waitq);
343 }
344
osc_extent_alloc(struct osc_object * obj)345 static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
346 {
347 struct osc_extent *ext;
348
349 ext = kmem_cache_alloc(osc_extent_kmem, GFP_NOFS | __GFP_ZERO);
350 if (ext == NULL)
351 return NULL;
352
353 RB_CLEAR_NODE(&ext->oe_node);
354 ext->oe_obj = obj;
355 atomic_set(&ext->oe_refc, 1);
356 atomic_set(&ext->oe_users, 0);
357 INIT_LIST_HEAD(&ext->oe_link);
358 ext->oe_state = OES_INV;
359 INIT_LIST_HEAD(&ext->oe_pages);
360 init_waitqueue_head(&ext->oe_waitq);
361 ext->oe_osclock = NULL;
362
363 return ext;
364 }
365
osc_extent_free(struct osc_extent * ext)366 static void osc_extent_free(struct osc_extent *ext)
367 {
368 kmem_cache_free(osc_extent_kmem, ext);
369 }
370
osc_extent_get(struct osc_extent * ext)371 static struct osc_extent *osc_extent_get(struct osc_extent *ext)
372 {
373 LASSERT(atomic_read(&ext->oe_refc) >= 0);
374 atomic_inc(&ext->oe_refc);
375 return ext;
376 }
377
osc_extent_put(const struct lu_env * env,struct osc_extent * ext)378 static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
379 {
380 LASSERT(atomic_read(&ext->oe_refc) > 0);
381 if (atomic_dec_and_test(&ext->oe_refc)) {
382 LASSERT(list_empty(&ext->oe_link));
383 LASSERT(atomic_read(&ext->oe_users) == 0);
384 LASSERT(ext->oe_state == OES_INV);
385 LASSERT(!ext->oe_intree);
386
387 if (ext->oe_osclock) {
388 cl_lock_put(env, ext->oe_osclock);
389 ext->oe_osclock = NULL;
390 }
391 osc_extent_free(ext);
392 }
393 }
394
395 /**
396 * osc_extent_put_trust() is a special version of osc_extent_put() when
397 * it's known that the caller is not the last user. This is to address the
398 * problem of lacking of lu_env ;-).
399 */
osc_extent_put_trust(struct osc_extent * ext)400 static void osc_extent_put_trust(struct osc_extent *ext)
401 {
402 LASSERT(atomic_read(&ext->oe_refc) > 1);
403 LASSERT(osc_object_is_locked(ext->oe_obj));
404 atomic_dec(&ext->oe_refc);
405 }
406
407 /**
408 * Return the extent which includes pgoff @index, or return the greatest
409 * previous extent in the tree.
410 */
osc_extent_search(struct osc_object * obj,pgoff_t index)411 static struct osc_extent *osc_extent_search(struct osc_object *obj,
412 pgoff_t index)
413 {
414 struct rb_node *n = obj->oo_root.rb_node;
415 struct osc_extent *tmp, *p = NULL;
416
417 LASSERT(osc_object_is_locked(obj));
418 while (n != NULL) {
419 tmp = rb_extent(n);
420 if (index < tmp->oe_start) {
421 n = n->rb_left;
422 } else if (index > tmp->oe_end) {
423 p = rb_extent(n);
424 n = n->rb_right;
425 } else {
426 return tmp;
427 }
428 }
429 return p;
430 }
431
432 /*
433 * Return the extent covering @index, otherwise return NULL.
434 * caller must have held object lock.
435 */
osc_extent_lookup(struct osc_object * obj,pgoff_t index)436 static struct osc_extent *osc_extent_lookup(struct osc_object *obj,
437 pgoff_t index)
438 {
439 struct osc_extent *ext;
440
441 ext = osc_extent_search(obj, index);
442 if (ext != NULL && ext->oe_start <= index && index <= ext->oe_end)
443 return osc_extent_get(ext);
444 return NULL;
445 }
446
447 /* caller must have held object lock. */
osc_extent_insert(struct osc_object * obj,struct osc_extent * ext)448 static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext)
449 {
450 struct rb_node **n = &obj->oo_root.rb_node;
451 struct rb_node *parent = NULL;
452 struct osc_extent *tmp;
453
454 LASSERT(ext->oe_intree == 0);
455 LASSERT(ext->oe_obj == obj);
456 LASSERT(osc_object_is_locked(obj));
457 while (*n != NULL) {
458 tmp = rb_extent(*n);
459 parent = *n;
460
461 if (ext->oe_end < tmp->oe_start)
462 n = &(*n)->rb_left;
463 else if (ext->oe_start > tmp->oe_end)
464 n = &(*n)->rb_right;
465 else
466 EASSERTF(0, tmp, EXTSTR, EXTPARA(ext));
467 }
468 rb_link_node(&ext->oe_node, parent, n);
469 rb_insert_color(&ext->oe_node, &obj->oo_root);
470 osc_extent_get(ext);
471 ext->oe_intree = 1;
472 }
473
474 /* caller must have held object lock. */
osc_extent_erase(struct osc_extent * ext)475 static void osc_extent_erase(struct osc_extent *ext)
476 {
477 struct osc_object *obj = ext->oe_obj;
478
479 LASSERT(osc_object_is_locked(obj));
480 if (ext->oe_intree) {
481 rb_erase(&ext->oe_node, &obj->oo_root);
482 ext->oe_intree = 0;
483 /* rbtree held a refcount */
484 osc_extent_put_trust(ext);
485 }
486 }
487
osc_extent_hold(struct osc_extent * ext)488 static struct osc_extent *osc_extent_hold(struct osc_extent *ext)
489 {
490 struct osc_object *obj = ext->oe_obj;
491
492 LASSERT(osc_object_is_locked(obj));
493 LASSERT(ext->oe_state == OES_ACTIVE || ext->oe_state == OES_CACHE);
494 if (ext->oe_state == OES_CACHE) {
495 osc_extent_state_set(ext, OES_ACTIVE);
496 osc_update_pending(obj, OBD_BRW_WRITE, -ext->oe_nr_pages);
497 }
498 atomic_inc(&ext->oe_users);
499 list_del_init(&ext->oe_link);
500 return osc_extent_get(ext);
501 }
502
__osc_extent_remove(struct osc_extent * ext)503 static void __osc_extent_remove(struct osc_extent *ext)
504 {
505 LASSERT(osc_object_is_locked(ext->oe_obj));
506 LASSERT(list_empty(&ext->oe_pages));
507 osc_extent_erase(ext);
508 list_del_init(&ext->oe_link);
509 osc_extent_state_set(ext, OES_INV);
510 OSC_EXTENT_DUMP(D_CACHE, ext, "destroyed.\n");
511 }
512
osc_extent_remove(struct osc_extent * ext)513 static void osc_extent_remove(struct osc_extent *ext)
514 {
515 struct osc_object *obj = ext->oe_obj;
516
517 osc_object_lock(obj);
518 __osc_extent_remove(ext);
519 osc_object_unlock(obj);
520 }
521
522 /**
523 * This function is used to merge extents to get better performance. It checks
524 * if @cur and @victim are contiguous at chunk level.
525 */
osc_extent_merge(const struct lu_env * env,struct osc_extent * cur,struct osc_extent * victim)526 static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
527 struct osc_extent *victim)
528 {
529 struct osc_object *obj = cur->oe_obj;
530 pgoff_t chunk_start;
531 pgoff_t chunk_end;
532 int ppc_bits;
533
534 LASSERT(cur->oe_state == OES_CACHE);
535 LASSERT(osc_object_is_locked(obj));
536 if (victim == NULL)
537 return -EINVAL;
538
539 if (victim->oe_state != OES_CACHE || victim->oe_fsync_wait)
540 return -EBUSY;
541
542 if (cur->oe_max_end != victim->oe_max_end)
543 return -ERANGE;
544
545 LASSERT(cur->oe_osclock == victim->oe_osclock);
546 ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT;
547 chunk_start = cur->oe_start >> ppc_bits;
548 chunk_end = cur->oe_end >> ppc_bits;
549 if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
550 chunk_end + 1 != victim->oe_start >> ppc_bits)
551 return -ERANGE;
552
553 OSC_EXTENT_DUMP(D_CACHE, victim, "will be merged by %p.\n", cur);
554
555 cur->oe_start = min(cur->oe_start, victim->oe_start);
556 cur->oe_end = max(cur->oe_end, victim->oe_end);
557 cur->oe_grants += victim->oe_grants;
558 cur->oe_nr_pages += victim->oe_nr_pages;
559 /* only the following bits are needed to merge */
560 cur->oe_urgent |= victim->oe_urgent;
561 cur->oe_memalloc |= victim->oe_memalloc;
562 list_splice_init(&victim->oe_pages, &cur->oe_pages);
563 list_del_init(&victim->oe_link);
564 victim->oe_nr_pages = 0;
565
566 osc_extent_get(victim);
567 __osc_extent_remove(victim);
568 osc_extent_put(env, victim);
569
570 OSC_EXTENT_DUMP(D_CACHE, cur, "after merging %p.\n", victim);
571 return 0;
572 }
573
574 /**
575 * Drop user count of osc_extent, and unplug IO asynchronously.
576 */
osc_extent_release(const struct lu_env * env,struct osc_extent * ext)577 void osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
578 {
579 struct osc_object *obj = ext->oe_obj;
580
581 LASSERT(atomic_read(&ext->oe_users) > 0);
582 LASSERT(sanity_check(ext) == 0);
583 LASSERT(ext->oe_grants > 0);
584
585 if (atomic_dec_and_lock(&ext->oe_users, &obj->oo_lock)) {
586 LASSERT(ext->oe_state == OES_ACTIVE);
587 if (ext->oe_trunc_pending) {
588 /* a truncate process is waiting for this extent.
589 * This may happen due to a race, check
590 * osc_cache_truncate_start(). */
591 osc_extent_state_set(ext, OES_TRUNC);
592 ext->oe_trunc_pending = 0;
593 } else {
594 osc_extent_state_set(ext, OES_CACHE);
595 osc_update_pending(obj, OBD_BRW_WRITE,
596 ext->oe_nr_pages);
597
598 /* try to merge the previous and next extent. */
599 osc_extent_merge(env, ext, prev_extent(ext));
600 osc_extent_merge(env, ext, next_extent(ext));
601
602 if (ext->oe_urgent)
603 list_move_tail(&ext->oe_link,
604 &obj->oo_urgent_exts);
605 }
606 osc_object_unlock(obj);
607
608 osc_io_unplug_async(env, osc_cli(obj), obj);
609 }
610 osc_extent_put(env, ext);
611 }
612
overlapped(struct osc_extent * ex1,struct osc_extent * ex2)613 static inline int overlapped(struct osc_extent *ex1, struct osc_extent *ex2)
614 {
615 return !(ex1->oe_end < ex2->oe_start || ex2->oe_end < ex1->oe_start);
616 }
617
618 /**
619 * Find or create an extent which includes @index, core function to manage
620 * extent tree.
621 */
osc_extent_find(const struct lu_env * env,struct osc_object * obj,pgoff_t index,int * grants)622 struct osc_extent *osc_extent_find(const struct lu_env *env,
623 struct osc_object *obj, pgoff_t index,
624 int *grants)
625
626 {
627 struct client_obd *cli = osc_cli(obj);
628 struct cl_lock *lock;
629 struct osc_extent *cur;
630 struct osc_extent *ext;
631 struct osc_extent *conflict = NULL;
632 struct osc_extent *found = NULL;
633 pgoff_t chunk;
634 pgoff_t max_end;
635 int max_pages; /* max_pages_per_rpc */
636 int chunksize;
637 int ppc_bits; /* pages per chunk bits */
638 int chunk_mask;
639 int rc;
640
641 cur = osc_extent_alloc(obj);
642 if (cur == NULL)
643 return ERR_PTR(-ENOMEM);
644
645 lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
646 LASSERT(lock != NULL);
647 LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
648
649 LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT);
650 ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
651 chunk_mask = ~((1 << ppc_bits) - 1);
652 chunksize = 1 << cli->cl_chunkbits;
653 chunk = index >> ppc_bits;
654
655 /* align end to rpc edge, rpc size may not be a power 2 integer. */
656 max_pages = cli->cl_max_pages_per_rpc;
657 LASSERT((max_pages & ~chunk_mask) == 0);
658 max_end = index - (index % max_pages) + max_pages - 1;
659 max_end = min_t(pgoff_t, max_end, lock->cll_descr.cld_end);
660
661 /* initialize new extent by parameters so far */
662 cur->oe_max_end = max_end;
663 cur->oe_start = index & chunk_mask;
664 cur->oe_end = ((index + ~chunk_mask + 1) & chunk_mask) - 1;
665 if (cur->oe_start < lock->cll_descr.cld_start)
666 cur->oe_start = lock->cll_descr.cld_start;
667 if (cur->oe_end > max_end)
668 cur->oe_end = max_end;
669 cur->oe_osclock = lock;
670 cur->oe_grants = 0;
671 cur->oe_mppr = max_pages;
672
673 /* grants has been allocated by caller */
674 LASSERTF(*grants >= chunksize + cli->cl_extent_tax,
675 "%u/%u/%u.\n", *grants, chunksize, cli->cl_extent_tax);
676 LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR, EXTPARA(cur));
677
678 restart:
679 osc_object_lock(obj);
680 ext = osc_extent_search(obj, cur->oe_start);
681 if (ext == NULL)
682 ext = first_extent(obj);
683 while (ext != NULL) {
684 loff_t ext_chk_start = ext->oe_start >> ppc_bits;
685 loff_t ext_chk_end = ext->oe_end >> ppc_bits;
686
687 LASSERT(sanity_check_nolock(ext) == 0);
688 if (chunk > ext_chk_end + 1)
689 break;
690
691 /* if covering by different locks, no chance to match */
692 if (lock != ext->oe_osclock) {
693 EASSERTF(!overlapped(ext, cur), ext,
694 EXTSTR, EXTPARA(cur));
695
696 ext = next_extent(ext);
697 continue;
698 }
699
700 /* discontiguous chunks? */
701 if (chunk + 1 < ext_chk_start) {
702 ext = next_extent(ext);
703 continue;
704 }
705
706 /* ok, from now on, ext and cur have these attrs:
707 * 1. covered by the same lock
708 * 2. contiguous at chunk level or overlapping. */
709
710 if (overlapped(ext, cur)) {
711 /* cur is the minimum unit, so overlapping means
712 * full contain. */
713 EASSERTF((ext->oe_start <= cur->oe_start &&
714 ext->oe_end >= cur->oe_end),
715 ext, EXTSTR, EXTPARA(cur));
716
717 if (ext->oe_state > OES_CACHE || ext->oe_fsync_wait) {
718 /* for simplicity, we wait for this extent to
719 * finish before going forward. */
720 conflict = osc_extent_get(ext);
721 break;
722 }
723
724 found = osc_extent_hold(ext);
725 break;
726 }
727
728 /* non-overlapped extent */
729 if (ext->oe_state != OES_CACHE || ext->oe_fsync_wait) {
730 /* we can't do anything for a non OES_CACHE extent, or
731 * if there is someone waiting for this extent to be
732 * flushed, try next one. */
733 ext = next_extent(ext);
734 continue;
735 }
736
737 /* check if they belong to the same rpc slot before trying to
738 * merge. the extents are not overlapped and contiguous at
739 * chunk level to get here. */
740 if (ext->oe_max_end != max_end) {
741 /* if they don't belong to the same RPC slot or
742 * max_pages_per_rpc has ever changed, do not merge. */
743 ext = next_extent(ext);
744 continue;
745 }
746
747 /* it's required that an extent must be contiguous at chunk
748 * level so that we know the whole extent is covered by grant
749 * (the pages in the extent are NOT required to be contiguous).
750 * Otherwise, it will be too much difficult to know which
751 * chunks have grants allocated. */
752
753 /* try to do front merge - extend ext's start */
754 if (chunk + 1 == ext_chk_start) {
755 /* ext must be chunk size aligned */
756 EASSERT((ext->oe_start & ~chunk_mask) == 0, ext);
757
758 /* pull ext's start back to cover cur */
759 ext->oe_start = cur->oe_start;
760 ext->oe_grants += chunksize;
761 *grants -= chunksize;
762
763 found = osc_extent_hold(ext);
764 } else if (chunk == ext_chk_end + 1) {
765 /* rear merge */
766 ext->oe_end = cur->oe_end;
767 ext->oe_grants += chunksize;
768 *grants -= chunksize;
769
770 /* try to merge with the next one because we just fill
771 * in a gap */
772 if (osc_extent_merge(env, ext, next_extent(ext)) == 0)
773 /* we can save extent tax from next extent */
774 *grants += cli->cl_extent_tax;
775
776 found = osc_extent_hold(ext);
777 }
778 if (found != NULL)
779 break;
780
781 ext = next_extent(ext);
782 }
783
784 osc_extent_tree_dump(D_CACHE, obj);
785 if (found != NULL) {
786 LASSERT(conflict == NULL);
787 if (!IS_ERR(found)) {
788 LASSERT(found->oe_osclock == cur->oe_osclock);
789 OSC_EXTENT_DUMP(D_CACHE, found,
790 "found caching ext for %lu.\n", index);
791 }
792 } else if (conflict == NULL) {
793 /* create a new extent */
794 EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur);
795 cur->oe_grants = chunksize + cli->cl_extent_tax;
796 *grants -= cur->oe_grants;
797 LASSERT(*grants >= 0);
798
799 cur->oe_state = OES_CACHE;
800 found = osc_extent_hold(cur);
801 osc_extent_insert(obj, cur);
802 OSC_EXTENT_DUMP(D_CACHE, cur, "add into tree %lu/%lu.\n",
803 index, lock->cll_descr.cld_end);
804 }
805 osc_object_unlock(obj);
806
807 if (conflict != NULL) {
808 LASSERT(found == NULL);
809
810 /* waiting for IO to finish. Please notice that it's impossible
811 * to be an OES_TRUNC extent. */
812 rc = osc_extent_wait(env, conflict, OES_INV);
813 osc_extent_put(env, conflict);
814 conflict = NULL;
815 if (rc < 0) {
816 found = ERR_PTR(rc);
817 goto out;
818 }
819
820 goto restart;
821 }
822
823 out:
824 osc_extent_put(env, cur);
825 LASSERT(*grants >= 0);
826 return found;
827 }
828
829 /**
830 * Called when IO is finished to an extent.
831 */
osc_extent_finish(const struct lu_env * env,struct osc_extent * ext,int sent,int rc)832 int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
833 int sent, int rc)
834 {
835 struct client_obd *cli = osc_cli(ext->oe_obj);
836 struct osc_async_page *oap;
837 struct osc_async_page *tmp;
838 int nr_pages = ext->oe_nr_pages;
839 int lost_grant = 0;
840 int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
841 __u64 last_off = 0;
842 int last_count = -1;
843
844 OSC_EXTENT_DUMP(D_CACHE, ext, "extent finished.\n");
845
846 ext->oe_rc = rc ?: ext->oe_nr_pages;
847 EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext);
848 list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
849 oap_pending_item) {
850 list_del_init(&oap->oap_rpc_item);
851 list_del_init(&oap->oap_pending_item);
852 if (last_off <= oap->oap_obj_off) {
853 last_off = oap->oap_obj_off;
854 last_count = oap->oap_count;
855 }
856
857 --ext->oe_nr_pages;
858 osc_ap_completion(env, cli, oap, sent, rc);
859 }
860 EASSERT(ext->oe_nr_pages == 0, ext);
861
862 if (!sent) {
863 lost_grant = ext->oe_grants;
864 } else if (blocksize < PAGE_CACHE_SIZE &&
865 last_count != PAGE_CACHE_SIZE) {
866 /* For short writes we shouldn't count parts of pages that
867 * span a whole chunk on the OST side, or our accounting goes
868 * wrong. Should match the code in filter_grant_check. */
869 int offset = oap->oap_page_off & ~CFS_PAGE_MASK;
870 int count = oap->oap_count + (offset & (blocksize - 1));
871 int end = (offset + oap->oap_count) & (blocksize - 1);
872
873 if (end)
874 count += blocksize - end;
875
876 lost_grant = PAGE_CACHE_SIZE - count;
877 }
878 if (ext->oe_grants > 0)
879 osc_free_grant(cli, nr_pages, lost_grant);
880
881 osc_extent_remove(ext);
882 /* put the refcount for RPC */
883 osc_extent_put(env, ext);
884 return 0;
885 }
886
extent_wait_cb(struct osc_extent * ext,int state)887 static int extent_wait_cb(struct osc_extent *ext, int state)
888 {
889 int ret;
890
891 osc_object_lock(ext->oe_obj);
892 ret = ext->oe_state == state;
893 osc_object_unlock(ext->oe_obj);
894
895 return ret;
896 }
897
898 /**
899 * Wait for the extent's state to become @state.
900 */
osc_extent_wait(const struct lu_env * env,struct osc_extent * ext,int state)901 static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
902 int state)
903 {
904 struct osc_object *obj = ext->oe_obj;
905 struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
906 LWI_ON_SIGNAL_NOOP, NULL);
907 int rc = 0;
908
909 osc_object_lock(obj);
910 LASSERT(sanity_check_nolock(ext) == 0);
911 /* `Kick' this extent only if the caller is waiting for it to be
912 * written out. */
913 if (state == OES_INV && !ext->oe_urgent && !ext->oe_hp &&
914 !ext->oe_trunc_pending) {
915 if (ext->oe_state == OES_ACTIVE) {
916 ext->oe_urgent = 1;
917 } else if (ext->oe_state == OES_CACHE) {
918 ext->oe_urgent = 1;
919 osc_extent_hold(ext);
920 rc = 1;
921 }
922 }
923 osc_object_unlock(obj);
924 if (rc == 1)
925 osc_extent_release(env, ext);
926
927 /* wait for the extent until its state becomes @state */
928 rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state), &lwi);
929 if (rc == -ETIMEDOUT) {
930 OSC_EXTENT_DUMP(D_ERROR, ext,
931 "%s: wait ext to %d timedout, recovery in progress?\n",
932 osc_export(obj)->exp_obd->obd_name, state);
933
934 lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
935 rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
936 &lwi);
937 }
938 if (rc == 0 && ext->oe_rc < 0)
939 rc = ext->oe_rc;
940 return rc;
941 }
942
943 /**
944 * Discard pages with index greater than @size. If @ext is overlapped with
945 * @size, then partial truncate happens.
946 */
osc_extent_truncate(struct osc_extent * ext,pgoff_t trunc_index,bool partial)947 static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
948 bool partial)
949 {
950 struct cl_env_nest nest;
951 struct lu_env *env;
952 struct cl_io *io;
953 struct osc_object *obj = ext->oe_obj;
954 struct client_obd *cli = osc_cli(obj);
955 struct osc_async_page *oap;
956 struct osc_async_page *tmp;
957 int pages_in_chunk = 0;
958 int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
959 __u64 trunc_chunk = trunc_index >> ppc_bits;
960 int grants = 0;
961 int nr_pages = 0;
962 int rc = 0;
963
964 LASSERT(sanity_check(ext) == 0);
965 EASSERT(ext->oe_state == OES_TRUNC, ext);
966 EASSERT(!ext->oe_urgent, ext);
967
968 /* Request new lu_env.
969 * We can't use that env from osc_cache_truncate_start() because
970 * it's from lov_io_sub and not fully initialized. */
971 env = cl_env_nested_get(&nest);
972 io = &osc_env_info(env)->oti_io;
973 io->ci_obj = cl_object_top(osc2cl(obj));
974 rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
975 if (rc < 0)
976 goto out;
977
978 /* discard all pages with index greater then trunc_index */
979 list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
980 oap_pending_item) {
981 struct cl_page *sub = oap2cl_page(oap);
982 struct cl_page *page = cl_page_top(sub);
983
984 LASSERT(list_empty(&oap->oap_rpc_item));
985
986 /* only discard the pages with their index greater than
987 * trunc_index, and ... */
988 if (sub->cp_index < trunc_index ||
989 (sub->cp_index == trunc_index && partial)) {
990 /* accounting how many pages remaining in the chunk
991 * so that we can calculate grants correctly. */
992 if (sub->cp_index >> ppc_bits == trunc_chunk)
993 ++pages_in_chunk;
994 continue;
995 }
996
997 list_del_init(&oap->oap_pending_item);
998
999 cl_page_get(page);
1000 lu_ref_add(&page->cp_reference, "truncate", current);
1001
1002 if (cl_page_own(env, io, page) == 0) {
1003 cl_page_unmap(env, io, page);
1004 cl_page_discard(env, io, page);
1005 cl_page_disown(env, io, page);
1006 } else {
1007 LASSERT(page->cp_state == CPS_FREEING);
1008 LASSERT(0);
1009 }
1010
1011 lu_ref_del(&page->cp_reference, "truncate", current);
1012 cl_page_put(env, page);
1013
1014 --ext->oe_nr_pages;
1015 ++nr_pages;
1016 }
1017 EASSERTF(ergo(ext->oe_start >= trunc_index + !!partial,
1018 ext->oe_nr_pages == 0),
1019 ext, "trunc_index %lu, partial %d\n", trunc_index, partial);
1020
1021 osc_object_lock(obj);
1022 if (ext->oe_nr_pages == 0) {
1023 LASSERT(pages_in_chunk == 0);
1024 grants = ext->oe_grants;
1025 ext->oe_grants = 0;
1026 } else { /* calculate how many grants we can free */
1027 int chunks = (ext->oe_end >> ppc_bits) - trunc_chunk;
1028 pgoff_t last_index;
1029
1030 /* if there is no pages in this chunk, we can also free grants
1031 * for the last chunk */
1032 if (pages_in_chunk == 0) {
1033 /* if this is the 1st chunk and no pages in this chunk,
1034 * ext->oe_nr_pages must be zero, so we should be in
1035 * the other if-clause. */
1036 LASSERT(trunc_chunk > 0);
1037 --trunc_chunk;
1038 ++chunks;
1039 }
1040
1041 /* this is what we can free from this extent */
1042 grants = chunks << cli->cl_chunkbits;
1043 ext->oe_grants -= grants;
1044 last_index = ((trunc_chunk + 1) << ppc_bits) - 1;
1045 ext->oe_end = min(last_index, ext->oe_max_end);
1046 LASSERT(ext->oe_end >= ext->oe_start);
1047 LASSERT(ext->oe_grants > 0);
1048 }
1049 osc_object_unlock(obj);
1050
1051 if (grants > 0 || nr_pages > 0)
1052 osc_free_grant(cli, nr_pages, grants);
1053
1054 out:
1055 cl_io_fini(env, io);
1056 cl_env_nested_put(&nest, env);
1057 return rc;
1058 }
1059
1060 /**
1061 * This function is used to make the extent prepared for transfer.
1062 * A race with flushing page - ll_writepage() has to be handled cautiously.
1063 */
osc_extent_make_ready(const struct lu_env * env,struct osc_extent * ext)1064 static int osc_extent_make_ready(const struct lu_env *env,
1065 struct osc_extent *ext)
1066 {
1067 struct osc_async_page *oap;
1068 struct osc_async_page *last = NULL;
1069 struct osc_object *obj = ext->oe_obj;
1070 int page_count = 0;
1071 int rc;
1072
1073 /* we're going to grab page lock, so object lock must not be taken. */
1074 LASSERT(sanity_check(ext) == 0);
1075 /* in locking state, any process should not touch this extent. */
1076 EASSERT(ext->oe_state == OES_LOCKING, ext);
1077 EASSERT(ext->oe_owner != NULL, ext);
1078
1079 OSC_EXTENT_DUMP(D_CACHE, ext, "make ready\n");
1080
1081 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1082 ++page_count;
1083 if (last == NULL || last->oap_obj_off < oap->oap_obj_off)
1084 last = oap;
1085
1086 /* checking ASYNC_READY is race safe */
1087 if ((oap->oap_async_flags & ASYNC_READY) != 0)
1088 continue;
1089
1090 rc = osc_make_ready(env, oap, OBD_BRW_WRITE);
1091 switch (rc) {
1092 case 0:
1093 spin_lock(&oap->oap_lock);
1094 oap->oap_async_flags |= ASYNC_READY;
1095 spin_unlock(&oap->oap_lock);
1096 break;
1097 case -EALREADY:
1098 LASSERT((oap->oap_async_flags & ASYNC_READY) != 0);
1099 break;
1100 default:
1101 LASSERTF(0, "unknown return code: %d\n", rc);
1102 }
1103 }
1104
1105 LASSERT(page_count == ext->oe_nr_pages);
1106 LASSERT(last != NULL);
1107 /* the last page is the only one we need to refresh its count by
1108 * the size of file. */
1109 if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
1110 last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
1111 LASSERT(last->oap_count > 0);
1112 LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE);
1113 last->oap_async_flags |= ASYNC_COUNT_STABLE;
1114 }
1115
1116 /* for the rest of pages, we don't need to call osf_refresh_count()
1117 * because it's known they are not the last page */
1118 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1119 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
1120 oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off;
1121 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
1122 }
1123 }
1124
1125 osc_object_lock(obj);
1126 osc_extent_state_set(ext, OES_RPC);
1127 osc_object_unlock(obj);
1128 /* get a refcount for RPC. */
1129 osc_extent_get(ext);
1130
1131 return 0;
1132 }
1133
1134 /**
1135 * Quick and simple version of osc_extent_find(). This function is frequently
1136 * called to expand the extent for the same IO. To expand the extent, the
1137 * page index must be in the same or next chunk of ext->oe_end.
1138 */
osc_extent_expand(struct osc_extent * ext,pgoff_t index,int * grants)1139 static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
1140 {
1141 struct osc_object *obj = ext->oe_obj;
1142 struct client_obd *cli = osc_cli(obj);
1143 struct osc_extent *next;
1144 int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
1145 pgoff_t chunk = index >> ppc_bits;
1146 pgoff_t end_chunk;
1147 pgoff_t end_index;
1148 int chunksize = 1 << cli->cl_chunkbits;
1149 int rc = 0;
1150
1151 LASSERT(ext->oe_max_end >= index && ext->oe_start <= index);
1152 osc_object_lock(obj);
1153 LASSERT(sanity_check_nolock(ext) == 0);
1154 end_chunk = ext->oe_end >> ppc_bits;
1155 if (chunk > end_chunk + 1) {
1156 rc = -ERANGE;
1157 goto out;
1158 }
1159
1160 if (end_chunk >= chunk) {
1161 rc = 0;
1162 goto out;
1163 }
1164
1165 LASSERT(end_chunk + 1 == chunk);
1166 /* try to expand this extent to cover @index */
1167 end_index = min(ext->oe_max_end, ((chunk + 1) << ppc_bits) - 1);
1168
1169 next = next_extent(ext);
1170 if (next != NULL && next->oe_start <= end_index) {
1171 /* complex mode - overlapped with the next extent,
1172 * this case will be handled by osc_extent_find() */
1173 rc = -EAGAIN;
1174 goto out;
1175 }
1176
1177 ext->oe_end = end_index;
1178 ext->oe_grants += chunksize;
1179 *grants -= chunksize;
1180 LASSERT(*grants >= 0);
1181 EASSERTF(osc_extent_is_overlapped(obj, ext) == 0, ext,
1182 "overlapped after expanding for %lu.\n", index);
1183
1184 out:
1185 osc_object_unlock(obj);
1186 return rc;
1187 }
1188
osc_extent_tree_dump0(int level,struct osc_object * obj,const char * func,int line)1189 static void osc_extent_tree_dump0(int level, struct osc_object *obj,
1190 const char *func, int line)
1191 {
1192 struct osc_extent *ext;
1193 int cnt;
1194
1195 CDEBUG(level, "Dump object %p extents at %s:%d, mppr: %u.\n",
1196 obj, func, line, osc_cli(obj)->cl_max_pages_per_rpc);
1197
1198 /* osc_object_lock(obj); */
1199 cnt = 1;
1200 for (ext = first_extent(obj); ext != NULL; ext = next_extent(ext))
1201 OSC_EXTENT_DUMP(level, ext, "in tree %d.\n", cnt++);
1202
1203 cnt = 1;
1204 list_for_each_entry(ext, &obj->oo_hp_exts, oe_link)
1205 OSC_EXTENT_DUMP(level, ext, "hp %d.\n", cnt++);
1206
1207 cnt = 1;
1208 list_for_each_entry(ext, &obj->oo_urgent_exts, oe_link)
1209 OSC_EXTENT_DUMP(level, ext, "urgent %d.\n", cnt++);
1210
1211 cnt = 1;
1212 list_for_each_entry(ext, &obj->oo_reading_exts, oe_link)
1213 OSC_EXTENT_DUMP(level, ext, "reading %d.\n", cnt++);
1214 /* osc_object_unlock(obj); */
1215 }
1216
1217 /* ------------------ osc extent end ------------------ */
1218
osc_is_ready(struct osc_object * osc)1219 static inline int osc_is_ready(struct osc_object *osc)
1220 {
1221 return !list_empty(&osc->oo_ready_item) ||
1222 !list_empty(&osc->oo_hp_ready_item);
1223 }
1224
1225 #define OSC_IO_DEBUG(OSC, STR, args...) \
1226 CDEBUG(D_CACHE, "obj %p ready %d|%c|%c wr %d|%c|%c rd %d|%c " STR, \
1227 (OSC), osc_is_ready(OSC), \
1228 list_empty_marker(&(OSC)->oo_hp_ready_item), \
1229 list_empty_marker(&(OSC)->oo_ready_item), \
1230 atomic_read(&(OSC)->oo_nr_writes), \
1231 list_empty_marker(&(OSC)->oo_hp_exts), \
1232 list_empty_marker(&(OSC)->oo_urgent_exts), \
1233 atomic_read(&(OSC)->oo_nr_reads), \
1234 list_empty_marker(&(OSC)->oo_reading_exts), \
1235 ##args)
1236
osc_make_ready(const struct lu_env * env,struct osc_async_page * oap,int cmd)1237 static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
1238 int cmd)
1239 {
1240 struct osc_page *opg = oap2osc_page(oap);
1241 struct cl_page *page = cl_page_top(oap2cl_page(oap));
1242 int result;
1243
1244 LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
1245
1246 result = cl_page_make_ready(env, page, CRT_WRITE);
1247 if (result == 0)
1248 opg->ops_submit_time = cfs_time_current();
1249 return result;
1250 }
1251
osc_refresh_count(const struct lu_env * env,struct osc_async_page * oap,int cmd)1252 static int osc_refresh_count(const struct lu_env *env,
1253 struct osc_async_page *oap, int cmd)
1254 {
1255 struct osc_page *opg = oap2osc_page(oap);
1256 struct cl_page *page = oap2cl_page(oap);
1257 struct cl_object *obj;
1258 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
1259
1260 int result;
1261 loff_t kms;
1262
1263 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
1264 LASSERT(!(cmd & OBD_BRW_READ));
1265 LASSERT(opg != NULL);
1266 obj = opg->ops_cl.cpl_obj;
1267
1268 cl_object_attr_lock(obj);
1269 result = cl_object_attr_get(env, obj, attr);
1270 cl_object_attr_unlock(obj);
1271 if (result < 0)
1272 return result;
1273 kms = attr->cat_kms;
1274 if (cl_offset(obj, page->cp_index) >= kms)
1275 /* catch race with truncate */
1276 return 0;
1277 else if (cl_offset(obj, page->cp_index + 1) > kms)
1278 /* catch sub-page write at end of file */
1279 return kms % PAGE_CACHE_SIZE;
1280 else
1281 return PAGE_CACHE_SIZE;
1282 }
1283
osc_completion(const struct lu_env * env,struct osc_async_page * oap,int cmd,int rc)1284 static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
1285 int cmd, int rc)
1286 {
1287 struct osc_page *opg = oap2osc_page(oap);
1288 struct cl_page *page = cl_page_top(oap2cl_page(oap));
1289 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
1290 enum cl_req_type crt;
1291 int srvlock;
1292
1293 cmd &= ~OBD_BRW_NOQUOTA;
1294 LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ));
1295 LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
1296 LASSERT(opg->ops_transfer_pinned);
1297
1298 /*
1299 * page->cp_req can be NULL if io submission failed before
1300 * cl_req was allocated.
1301 */
1302 if (page->cp_req != NULL)
1303 cl_req_page_done(env, page);
1304 LASSERT(page->cp_req == NULL);
1305
1306 crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
1307 /* Clear opg->ops_transfer_pinned before VM lock is released. */
1308 opg->ops_transfer_pinned = 0;
1309
1310 spin_lock(&obj->oo_seatbelt);
1311 LASSERT(opg->ops_submitter != NULL);
1312 LASSERT(!list_empty(&opg->ops_inflight));
1313 list_del_init(&opg->ops_inflight);
1314 opg->ops_submitter = NULL;
1315 spin_unlock(&obj->oo_seatbelt);
1316
1317 opg->ops_submit_time = 0;
1318 srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
1319
1320 /* statistic */
1321 if (rc == 0 && srvlock) {
1322 struct lu_device *ld = opg->ops_cl.cpl_obj->co_lu.lo_dev;
1323 struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
1324 int bytes = oap->oap_count;
1325
1326 if (crt == CRT_READ)
1327 stats->os_lockless_reads += bytes;
1328 else
1329 stats->os_lockless_writes += bytes;
1330 }
1331
1332 /*
1333 * This has to be the last operation with the page, as locks are
1334 * released in cl_page_completion() and nothing except for the
1335 * reference counter protects page from concurrent reclaim.
1336 */
1337 lu_ref_del(&page->cp_reference, "transfer", page);
1338
1339 cl_page_completion(env, page, crt, rc);
1340
1341 return 0;
1342 }
1343
1344 #define OSC_DUMP_GRANT(cli, fmt, args...) do { \
1345 struct client_obd *__tmp = (cli); \
1346 CDEBUG(D_CACHE, "%s: { dirty: %ld/%ld dirty_pages: %d/%d " \
1347 "dropped: %ld avail: %ld, reserved: %ld, flight: %d } " fmt, \
1348 __tmp->cl_import->imp_obd->obd_name, \
1349 __tmp->cl_dirty, __tmp->cl_dirty_max, \
1350 atomic_read(&obd_dirty_pages), obd_max_dirty_pages, \
1351 __tmp->cl_lost_grant, __tmp->cl_avail_grant, \
1352 __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, ##args); \
1353 } while (0)
1354
1355 /* caller must hold loi_list_lock */
osc_consume_write_grant(struct client_obd * cli,struct brw_page * pga)1356 static void osc_consume_write_grant(struct client_obd *cli,
1357 struct brw_page *pga)
1358 {
1359 assert_spin_locked(&cli->cl_loi_list_lock.lock);
1360 LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
1361 atomic_inc(&obd_dirty_pages);
1362 cli->cl_dirty += PAGE_CACHE_SIZE;
1363 pga->flag |= OBD_BRW_FROM_GRANT;
1364 CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
1365 PAGE_CACHE_SIZE, pga, pga->pg);
1366 osc_update_next_shrink(cli);
1367 }
1368
1369 /* the companion to osc_consume_write_grant, called when a brw has completed.
1370 * must be called with the loi lock held. */
osc_release_write_grant(struct client_obd * cli,struct brw_page * pga)1371 static void osc_release_write_grant(struct client_obd *cli,
1372 struct brw_page *pga)
1373 {
1374 assert_spin_locked(&cli->cl_loi_list_lock.lock);
1375 if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
1376 return;
1377 }
1378
1379 pga->flag &= ~OBD_BRW_FROM_GRANT;
1380 atomic_dec(&obd_dirty_pages);
1381 cli->cl_dirty -= PAGE_CACHE_SIZE;
1382 if (pga->flag & OBD_BRW_NOCACHE) {
1383 pga->flag &= ~OBD_BRW_NOCACHE;
1384 atomic_dec(&obd_dirty_transit_pages);
1385 cli->cl_dirty_transit -= PAGE_CACHE_SIZE;
1386 }
1387 }
1388
1389 /**
1390 * To avoid sleeping with object lock held, it's good for us allocate enough
1391 * grants before entering into critical section.
1392 *
1393 * client_obd_list_lock held by caller
1394 */
osc_reserve_grant(struct client_obd * cli,unsigned int bytes)1395 static int osc_reserve_grant(struct client_obd *cli, unsigned int bytes)
1396 {
1397 int rc = -EDQUOT;
1398
1399 if (cli->cl_avail_grant >= bytes) {
1400 cli->cl_avail_grant -= bytes;
1401 cli->cl_reserved_grant += bytes;
1402 rc = 0;
1403 }
1404 return rc;
1405 }
1406
__osc_unreserve_grant(struct client_obd * cli,unsigned int reserved,unsigned int unused)1407 static void __osc_unreserve_grant(struct client_obd *cli,
1408 unsigned int reserved, unsigned int unused)
1409 {
1410 /* it's quite normal for us to get more grant than reserved.
1411 * Thinking about a case that two extents merged by adding a new
1412 * chunk, we can save one extent tax. If extent tax is greater than
1413 * one chunk, we can save more grant by adding a new chunk */
1414 cli->cl_reserved_grant -= reserved;
1415 if (unused > reserved) {
1416 cli->cl_avail_grant += reserved;
1417 cli->cl_lost_grant += unused - reserved;
1418 } else {
1419 cli->cl_avail_grant += unused;
1420 }
1421 }
1422
osc_unreserve_grant(struct client_obd * cli,unsigned int reserved,unsigned int unused)1423 void osc_unreserve_grant(struct client_obd *cli,
1424 unsigned int reserved, unsigned int unused)
1425 {
1426 client_obd_list_lock(&cli->cl_loi_list_lock);
1427 __osc_unreserve_grant(cli, reserved, unused);
1428 if (unused > 0)
1429 osc_wake_cache_waiters(cli);
1430 client_obd_list_unlock(&cli->cl_loi_list_lock);
1431 }
1432
1433 /**
1434 * Free grant after IO is finished or canceled.
1435 *
1436 * @lost_grant is used to remember how many grants we have allocated but not
1437 * used, we should return these grants to OST. There're two cases where grants
1438 * can be lost:
1439 * 1. truncate;
1440 * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was
1441 * written. In this case OST may use less chunks to serve this partial
1442 * write. OSTs don't actually know the page size on the client side. so
1443 * clients have to calculate lost grant by the blocksize on the OST.
1444 * See filter_grant_check() for details.
1445 */
osc_free_grant(struct client_obd * cli,unsigned int nr_pages,unsigned int lost_grant)1446 static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
1447 unsigned int lost_grant)
1448 {
1449 int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
1450
1451 client_obd_list_lock(&cli->cl_loi_list_lock);
1452 atomic_sub(nr_pages, &obd_dirty_pages);
1453 cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT;
1454 cli->cl_lost_grant += lost_grant;
1455 if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
1456 /* borrow some grant from truncate to avoid the case that
1457 * truncate uses up all avail grant */
1458 cli->cl_lost_grant -= grant;
1459 cli->cl_avail_grant += grant;
1460 }
1461 osc_wake_cache_waiters(cli);
1462 client_obd_list_unlock(&cli->cl_loi_list_lock);
1463 CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n",
1464 lost_grant, cli->cl_lost_grant,
1465 cli->cl_avail_grant, cli->cl_dirty);
1466 }
1467
1468 /**
1469 * The companion to osc_enter_cache(), called when @oap is no longer part of
1470 * the dirty accounting due to error.
1471 */
osc_exit_cache(struct client_obd * cli,struct osc_async_page * oap)1472 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap)
1473 {
1474 client_obd_list_lock(&cli->cl_loi_list_lock);
1475 osc_release_write_grant(cli, &oap->oap_brw_page);
1476 client_obd_list_unlock(&cli->cl_loi_list_lock);
1477 }
1478
1479 /**
1480 * Non-blocking version of osc_enter_cache() that consumes grant only when it
1481 * is available.
1482 */
osc_enter_cache_try(struct client_obd * cli,struct osc_async_page * oap,int bytes,int transient)1483 static int osc_enter_cache_try(struct client_obd *cli,
1484 struct osc_async_page *oap,
1485 int bytes, int transient)
1486 {
1487 int rc;
1488
1489 OSC_DUMP_GRANT(cli, "need:%d.\n", bytes);
1490
1491 rc = osc_reserve_grant(cli, bytes);
1492 if (rc < 0)
1493 return 0;
1494
1495 if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max &&
1496 atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
1497 osc_consume_write_grant(cli, &oap->oap_brw_page);
1498 if (transient) {
1499 cli->cl_dirty_transit += PAGE_CACHE_SIZE;
1500 atomic_inc(&obd_dirty_transit_pages);
1501 oap->oap_brw_flags |= OBD_BRW_NOCACHE;
1502 }
1503 rc = 1;
1504 } else {
1505 __osc_unreserve_grant(cli, bytes, bytes);
1506 rc = 0;
1507 }
1508 return rc;
1509 }
1510
ocw_granted(struct client_obd * cli,struct osc_cache_waiter * ocw)1511 static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
1512 {
1513 int rc;
1514
1515 client_obd_list_lock(&cli->cl_loi_list_lock);
1516 rc = list_empty(&ocw->ocw_entry);
1517 client_obd_list_unlock(&cli->cl_loi_list_lock);
1518 return rc;
1519 }
1520
1521 /**
1522 * The main entry to reserve dirty page accounting. Usually the grant reserved
1523 * in this function will be freed in bulk in osc_free_grant() unless it fails
1524 * to add osc cache, in that case, it will be freed in osc_exit_cache().
1525 *
1526 * The process will be put into sleep if it's already run out of grant.
1527 */
osc_enter_cache(const struct lu_env * env,struct client_obd * cli,struct osc_async_page * oap,int bytes)1528 static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
1529 struct osc_async_page *oap, int bytes)
1530 {
1531 struct osc_object *osc = oap->oap_obj;
1532 struct lov_oinfo *loi = osc->oo_oinfo;
1533 struct osc_cache_waiter ocw;
1534 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
1535 int rc = -EDQUOT;
1536
1537 OSC_DUMP_GRANT(cli, "need:%d.\n", bytes);
1538
1539 client_obd_list_lock(&cli->cl_loi_list_lock);
1540
1541 /* force the caller to try sync io. this can jump the list
1542 * of queued writes and create a discontiguous rpc stream */
1543 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
1544 cli->cl_dirty_max < PAGE_CACHE_SIZE ||
1545 cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
1546 rc = -EDQUOT;
1547 goto out;
1548 }
1549
1550 /* Hopefully normal case - cache space and write credits available */
1551 if (osc_enter_cache_try(cli, oap, bytes, 0)) {
1552 rc = 0;
1553 goto out;
1554 }
1555
1556 /* We can get here for two reasons: too many dirty pages in cache, or
1557 * run out of grants. In both cases we should write dirty pages out.
1558 * Adding a cache waiter will trigger urgent write-out no matter what
1559 * RPC size will be.
1560 * The exiting condition is no avail grants and no dirty pages caching,
1561 * that really means there is no space on the OST. */
1562 init_waitqueue_head(&ocw.ocw_waitq);
1563 ocw.ocw_oap = oap;
1564 ocw.ocw_grant = bytes;
1565 while (cli->cl_dirty > 0 || cli->cl_w_in_flight > 0) {
1566 list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
1567 ocw.ocw_rc = 0;
1568 client_obd_list_unlock(&cli->cl_loi_list_lock);
1569
1570 osc_io_unplug_async(env, cli, NULL);
1571
1572 CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
1573 cli->cl_import->imp_obd->obd_name, &ocw, oap);
1574
1575 rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
1576
1577 client_obd_list_lock(&cli->cl_loi_list_lock);
1578
1579 /* l_wait_event is interrupted by signal */
1580 if (rc < 0) {
1581 list_del_init(&ocw.ocw_entry);
1582 goto out;
1583 }
1584
1585 LASSERT(list_empty(&ocw.ocw_entry));
1586 rc = ocw.ocw_rc;
1587
1588 if (rc != -EDQUOT)
1589 goto out;
1590 if (osc_enter_cache_try(cli, oap, bytes, 0)) {
1591 rc = 0;
1592 goto out;
1593 }
1594 }
1595 out:
1596 client_obd_list_unlock(&cli->cl_loi_list_lock);
1597 OSC_DUMP_GRANT(cli, "returned %d.\n", rc);
1598 return rc;
1599 }
1600
1601 /* caller must hold loi_list_lock */
osc_wake_cache_waiters(struct client_obd * cli)1602 void osc_wake_cache_waiters(struct client_obd *cli)
1603 {
1604 struct list_head *l, *tmp;
1605 struct osc_cache_waiter *ocw;
1606
1607 list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
1608 ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
1609 list_del_init(&ocw->ocw_entry);
1610
1611 ocw->ocw_rc = -EDQUOT;
1612 /* we can't dirty more */
1613 if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) ||
1614 (atomic_read(&obd_dirty_pages) + 1 >
1615 obd_max_dirty_pages)) {
1616 CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
1617 cli->cl_dirty,
1618 cli->cl_dirty_max, obd_max_dirty_pages);
1619 goto wakeup;
1620 }
1621
1622 ocw->ocw_rc = 0;
1623 if (!osc_enter_cache_try(cli, ocw->ocw_oap, ocw->ocw_grant, 0))
1624 ocw->ocw_rc = -EDQUOT;
1625
1626 wakeup:
1627 CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant %ld, %d\n",
1628 ocw, ocw->ocw_oap, cli->cl_avail_grant, ocw->ocw_rc);
1629
1630 wake_up(&ocw->ocw_waitq);
1631 }
1632 }
1633
osc_max_rpc_in_flight(struct client_obd * cli,struct osc_object * osc)1634 static int osc_max_rpc_in_flight(struct client_obd *cli, struct osc_object *osc)
1635 {
1636 int hprpc = !!list_empty(&osc->oo_hp_exts);
1637
1638 return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc;
1639 }
1640
1641 /* This maintains the lists of pending pages to read/write for a given object
1642 * (lop). This is used by osc_check_rpcs->osc_next_obj() and osc_list_maint()
1643 * to quickly find objects that are ready to send an RPC. */
osc_makes_rpc(struct client_obd * cli,struct osc_object * osc,int cmd)1644 static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc,
1645 int cmd)
1646 {
1647 int invalid_import = 0;
1648
1649 /* if we have an invalid import we want to drain the queued pages
1650 * by forcing them through rpcs that immediately fail and complete
1651 * the pages. recovery relies on this to empty the queued pages
1652 * before canceling the locks and evicting down the llite pages */
1653 if ((cli->cl_import == NULL || cli->cl_import->imp_invalid))
1654 invalid_import = 1;
1655
1656 if (cmd & OBD_BRW_WRITE) {
1657 if (atomic_read(&osc->oo_nr_writes) == 0)
1658 return 0;
1659 if (invalid_import) {
1660 CDEBUG(D_CACHE, "invalid import forcing RPC\n");
1661 return 1;
1662 }
1663 if (!list_empty(&osc->oo_hp_exts)) {
1664 CDEBUG(D_CACHE, "high prio request forcing RPC\n");
1665 return 1;
1666 }
1667 if (!list_empty(&osc->oo_urgent_exts)) {
1668 CDEBUG(D_CACHE, "urgent request forcing RPC\n");
1669 return 1;
1670 }
1671 /* trigger a write rpc stream as long as there are dirtiers
1672 * waiting for space. as they're waiting, they're not going to
1673 * create more pages to coalesce with what's waiting.. */
1674 if (!list_empty(&cli->cl_cache_waiters)) {
1675 CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
1676 return 1;
1677 }
1678 if (atomic_read(&osc->oo_nr_writes) >=
1679 cli->cl_max_pages_per_rpc)
1680 return 1;
1681 } else {
1682 if (atomic_read(&osc->oo_nr_reads) == 0)
1683 return 0;
1684 if (invalid_import) {
1685 CDEBUG(D_CACHE, "invalid import forcing RPC\n");
1686 return 1;
1687 }
1688 /* all read are urgent. */
1689 if (!list_empty(&osc->oo_reading_exts))
1690 return 1;
1691 }
1692
1693 return 0;
1694 }
1695
osc_update_pending(struct osc_object * obj,int cmd,int delta)1696 static void osc_update_pending(struct osc_object *obj, int cmd, int delta)
1697 {
1698 struct client_obd *cli = osc_cli(obj);
1699
1700 if (cmd & OBD_BRW_WRITE) {
1701 atomic_add(delta, &obj->oo_nr_writes);
1702 atomic_add(delta, &cli->cl_pending_w_pages);
1703 LASSERT(atomic_read(&obj->oo_nr_writes) >= 0);
1704 } else {
1705 atomic_add(delta, &obj->oo_nr_reads);
1706 atomic_add(delta, &cli->cl_pending_r_pages);
1707 LASSERT(atomic_read(&obj->oo_nr_reads) >= 0);
1708 }
1709 OSC_IO_DEBUG(obj, "update pending cmd %d delta %d.\n", cmd, delta);
1710 }
1711
osc_makes_hprpc(struct osc_object * obj)1712 static int osc_makes_hprpc(struct osc_object *obj)
1713 {
1714 return !list_empty(&obj->oo_hp_exts);
1715 }
1716
on_list(struct list_head * item,struct list_head * list,int should_be_on)1717 static void on_list(struct list_head *item, struct list_head *list, int should_be_on)
1718 {
1719 if (list_empty(item) && should_be_on)
1720 list_add_tail(item, list);
1721 else if (!list_empty(item) && !should_be_on)
1722 list_del_init(item);
1723 }
1724
1725 /* maintain the osc's cli list membership invariants so that osc_send_oap_rpc
1726 * can find pages to build into rpcs quickly */
__osc_list_maint(struct client_obd * cli,struct osc_object * osc)1727 static int __osc_list_maint(struct client_obd *cli, struct osc_object *osc)
1728 {
1729 if (osc_makes_hprpc(osc)) {
1730 /* HP rpc */
1731 on_list(&osc->oo_ready_item, &cli->cl_loi_ready_list, 0);
1732 on_list(&osc->oo_hp_ready_item, &cli->cl_loi_hp_ready_list, 1);
1733 } else {
1734 on_list(&osc->oo_hp_ready_item, &cli->cl_loi_hp_ready_list, 0);
1735 on_list(&osc->oo_ready_item, &cli->cl_loi_ready_list,
1736 osc_makes_rpc(cli, osc, OBD_BRW_WRITE) ||
1737 osc_makes_rpc(cli, osc, OBD_BRW_READ));
1738 }
1739
1740 on_list(&osc->oo_write_item, &cli->cl_loi_write_list,
1741 atomic_read(&osc->oo_nr_writes) > 0);
1742
1743 on_list(&osc->oo_read_item, &cli->cl_loi_read_list,
1744 atomic_read(&osc->oo_nr_reads) > 0);
1745
1746 return osc_is_ready(osc);
1747 }
1748
osc_list_maint(struct client_obd * cli,struct osc_object * osc)1749 static int osc_list_maint(struct client_obd *cli, struct osc_object *osc)
1750 {
1751 int is_ready;
1752
1753 client_obd_list_lock(&cli->cl_loi_list_lock);
1754 is_ready = __osc_list_maint(cli, osc);
1755 client_obd_list_unlock(&cli->cl_loi_list_lock);
1756
1757 return is_ready;
1758 }
1759
1760 /* this is trying to propagate async writeback errors back up to the
1761 * application. As an async write fails we record the error code for later if
1762 * the app does an fsync. As long as errors persist we force future rpcs to be
1763 * sync so that the app can get a sync error and break the cycle of queueing
1764 * pages for which writeback will fail. */
osc_process_ar(struct osc_async_rc * ar,__u64 xid,int rc)1765 static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
1766 int rc)
1767 {
1768 if (rc) {
1769 if (!ar->ar_rc)
1770 ar->ar_rc = rc;
1771
1772 ar->ar_force_sync = 1;
1773 ar->ar_min_xid = ptlrpc_sample_next_xid();
1774 return;
1775
1776 }
1777
1778 if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
1779 ar->ar_force_sync = 0;
1780 }
1781
1782 /* this must be called holding the loi list lock to give coverage to exit_cache,
1783 * async_flag maintenance, and oap_request */
osc_ap_completion(const struct lu_env * env,struct client_obd * cli,struct osc_async_page * oap,int sent,int rc)1784 static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
1785 struct osc_async_page *oap, int sent, int rc)
1786 {
1787 struct osc_object *osc = oap->oap_obj;
1788 struct lov_oinfo *loi = osc->oo_oinfo;
1789 __u64 xid = 0;
1790
1791 if (oap->oap_request != NULL) {
1792 xid = ptlrpc_req_xid(oap->oap_request);
1793 ptlrpc_req_finished(oap->oap_request);
1794 oap->oap_request = NULL;
1795 }
1796
1797 /* As the transfer for this page is being done, clear the flags */
1798 spin_lock(&oap->oap_lock);
1799 oap->oap_async_flags = 0;
1800 spin_unlock(&oap->oap_lock);
1801 oap->oap_interrupted = 0;
1802
1803 if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) {
1804 client_obd_list_lock(&cli->cl_loi_list_lock);
1805 osc_process_ar(&cli->cl_ar, xid, rc);
1806 osc_process_ar(&loi->loi_ar, xid, rc);
1807 client_obd_list_unlock(&cli->cl_loi_list_lock);
1808 }
1809
1810 rc = osc_completion(env, oap, oap->oap_cmd, rc);
1811 if (rc)
1812 CERROR("completion on oap %p obj %p returns %d.\n",
1813 oap, osc, rc);
1814 }
1815
1816 /**
1817 * Try to add extent to one RPC. We need to think about the following things:
1818 * - # of pages must not be over max_pages_per_rpc
1819 * - extent must be compatible with previous ones
1820 */
try_to_add_extent_for_io(struct client_obd * cli,struct osc_extent * ext,struct list_head * rpclist,int * pc,unsigned int * max_pages)1821 static int try_to_add_extent_for_io(struct client_obd *cli,
1822 struct osc_extent *ext, struct list_head *rpclist,
1823 int *pc, unsigned int *max_pages)
1824 {
1825 struct osc_extent *tmp;
1826 struct osc_async_page *oap = list_first_entry(&ext->oe_pages,
1827 struct osc_async_page,
1828 oap_pending_item);
1829
1830 EASSERT((ext->oe_state == OES_CACHE || ext->oe_state == OES_LOCK_DONE),
1831 ext);
1832
1833 *max_pages = max(ext->oe_mppr, *max_pages);
1834 if (*pc + ext->oe_nr_pages > *max_pages)
1835 return 0;
1836
1837 list_for_each_entry(tmp, rpclist, oe_link) {
1838 struct osc_async_page *oap2;
1839
1840 oap2 = list_first_entry(&tmp->oe_pages, struct osc_async_page,
1841 oap_pending_item);
1842 EASSERT(tmp->oe_owner == current, tmp);
1843 if (oap2cl_page(oap)->cp_type != oap2cl_page(oap2)->cp_type) {
1844 CDEBUG(D_CACHE, "Do not permit different type of IO"
1845 " for a same RPC\n");
1846 return 0;
1847 }
1848
1849 if (tmp->oe_srvlock != ext->oe_srvlock ||
1850 !tmp->oe_grants != !ext->oe_grants)
1851 return 0;
1852
1853 /* remove break for strict check */
1854 break;
1855 }
1856
1857 *pc += ext->oe_nr_pages;
1858 list_move_tail(&ext->oe_link, rpclist);
1859 ext->oe_owner = current;
1860 return 1;
1861 }
1862
1863 /**
1864 * In order to prevent multiple ptlrpcd from breaking contiguous extents,
1865 * get_write_extent() takes all appropriate extents in atomic.
1866 *
1867 * The following policy is used to collect extents for IO:
1868 * 1. Add as many HP extents as possible;
1869 * 2. Add the first urgent extent in urgent extent list and take it out of
1870 * urgent list;
1871 * 3. Add subsequent extents of this urgent extent;
1872 * 4. If urgent list is not empty, goto 2;
1873 * 5. Traverse the extent tree from the 1st extent;
1874 * 6. Above steps exit if there is no space in this RPC.
1875 */
get_write_extents(struct osc_object * obj,struct list_head * rpclist)1876 static int get_write_extents(struct osc_object *obj, struct list_head *rpclist)
1877 {
1878 struct client_obd *cli = osc_cli(obj);
1879 struct osc_extent *ext;
1880 int page_count = 0;
1881 unsigned int max_pages = cli->cl_max_pages_per_rpc;
1882
1883 LASSERT(osc_object_is_locked(obj));
1884 while (!list_empty(&obj->oo_hp_exts)) {
1885 ext = list_entry(obj->oo_hp_exts.next, struct osc_extent,
1886 oe_link);
1887 LASSERT(ext->oe_state == OES_CACHE);
1888 if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
1889 &max_pages))
1890 return page_count;
1891 EASSERT(ext->oe_nr_pages <= max_pages, ext);
1892 }
1893 if (page_count == max_pages)
1894 return page_count;
1895
1896 while (!list_empty(&obj->oo_urgent_exts)) {
1897 ext = list_entry(obj->oo_urgent_exts.next,
1898 struct osc_extent, oe_link);
1899 if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
1900 &max_pages))
1901 return page_count;
1902
1903 if (!ext->oe_intree)
1904 continue;
1905
1906 while ((ext = next_extent(ext)) != NULL) {
1907 if ((ext->oe_state != OES_CACHE) ||
1908 (!list_empty(&ext->oe_link) &&
1909 ext->oe_owner != NULL))
1910 continue;
1911
1912 if (!try_to_add_extent_for_io(cli, ext, rpclist,
1913 &page_count, &max_pages))
1914 return page_count;
1915 }
1916 }
1917 if (page_count == max_pages)
1918 return page_count;
1919
1920 ext = first_extent(obj);
1921 while (ext != NULL) {
1922 if ((ext->oe_state != OES_CACHE) ||
1923 /* this extent may be already in current rpclist */
1924 (!list_empty(&ext->oe_link) && ext->oe_owner != NULL)) {
1925 ext = next_extent(ext);
1926 continue;
1927 }
1928
1929 if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
1930 &max_pages))
1931 return page_count;
1932
1933 ext = next_extent(ext);
1934 }
1935 return page_count;
1936 }
1937
1938 static int
osc_send_write_rpc(const struct lu_env * env,struct client_obd * cli,struct osc_object * osc)1939 osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
1940 struct osc_object *osc)
1941 {
1942 LIST_HEAD(rpclist);
1943 struct osc_extent *ext;
1944 struct osc_extent *tmp;
1945 struct osc_extent *first = NULL;
1946 u32 page_count = 0;
1947 int srvlock = 0;
1948 int rc = 0;
1949
1950 LASSERT(osc_object_is_locked(osc));
1951
1952 page_count = get_write_extents(osc, &rpclist);
1953 LASSERT(equi(page_count == 0, list_empty(&rpclist)));
1954
1955 if (list_empty(&rpclist))
1956 return 0;
1957
1958 osc_update_pending(osc, OBD_BRW_WRITE, -page_count);
1959
1960 list_for_each_entry(ext, &rpclist, oe_link) {
1961 LASSERT(ext->oe_state == OES_CACHE ||
1962 ext->oe_state == OES_LOCK_DONE);
1963 if (ext->oe_state == OES_CACHE)
1964 osc_extent_state_set(ext, OES_LOCKING);
1965 else
1966 osc_extent_state_set(ext, OES_RPC);
1967 }
1968
1969 /* we're going to grab page lock, so release object lock because
1970 * lock order is page lock -> object lock. */
1971 osc_object_unlock(osc);
1972
1973 list_for_each_entry_safe(ext, tmp, &rpclist, oe_link) {
1974 if (ext->oe_state == OES_LOCKING) {
1975 rc = osc_extent_make_ready(env, ext);
1976 if (unlikely(rc < 0)) {
1977 list_del_init(&ext->oe_link);
1978 osc_extent_finish(env, ext, 0, rc);
1979 continue;
1980 }
1981 }
1982 if (first == NULL) {
1983 first = ext;
1984 srvlock = ext->oe_srvlock;
1985 } else {
1986 LASSERT(srvlock == ext->oe_srvlock);
1987 }
1988 }
1989
1990 if (!list_empty(&rpclist)) {
1991 LASSERT(page_count > 0);
1992 rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_WRITE);
1993 LASSERT(list_empty(&rpclist));
1994 }
1995
1996 osc_object_lock(osc);
1997 return rc;
1998 }
1999
2000 /**
2001 * prepare pages for ASYNC io and put pages in send queue.
2002 *
2003 * \param cmd OBD_BRW_* macroses
2004 * \param lop pending pages
2005 *
2006 * \return zero if no page added to send queue.
2007 * \return 1 if pages successfully added to send queue.
2008 * \return negative on errors.
2009 */
2010 static int
osc_send_read_rpc(const struct lu_env * env,struct client_obd * cli,struct osc_object * osc)2011 osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
2012 struct osc_object *osc)
2013 {
2014 struct osc_extent *ext;
2015 struct osc_extent *next;
2016 LIST_HEAD(rpclist);
2017 int page_count = 0;
2018 unsigned int max_pages = cli->cl_max_pages_per_rpc;
2019 int rc = 0;
2020
2021 LASSERT(osc_object_is_locked(osc));
2022 list_for_each_entry_safe(ext, next,
2023 &osc->oo_reading_exts, oe_link) {
2024 EASSERT(ext->oe_state == OES_LOCK_DONE, ext);
2025 if (!try_to_add_extent_for_io(cli, ext, &rpclist, &page_count,
2026 &max_pages))
2027 break;
2028 osc_extent_state_set(ext, OES_RPC);
2029 EASSERT(ext->oe_nr_pages <= max_pages, ext);
2030 }
2031 LASSERT(page_count <= max_pages);
2032
2033 osc_update_pending(osc, OBD_BRW_READ, -page_count);
2034
2035 if (!list_empty(&rpclist)) {
2036 osc_object_unlock(osc);
2037
2038 LASSERT(page_count > 0);
2039 rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_READ);
2040 LASSERT(list_empty(&rpclist));
2041
2042 osc_object_lock(osc);
2043 }
2044 return rc;
2045 }
2046
2047 #define list_to_obj(list, item) ({ \
2048 struct list_head *__tmp = (list)->next; \
2049 list_del_init(__tmp); \
2050 list_entry(__tmp, struct osc_object, oo_##item); \
2051 })
2052
2053 /* This is called by osc_check_rpcs() to find which objects have pages that
2054 * we could be sending. These lists are maintained by osc_makes_rpc(). */
osc_next_obj(struct client_obd * cli)2055 static struct osc_object *osc_next_obj(struct client_obd *cli)
2056 {
2057 /* First return objects that have blocked locks so that they
2058 * will be flushed quickly and other clients can get the lock,
2059 * then objects which have pages ready to be stuffed into RPCs */
2060 if (!list_empty(&cli->cl_loi_hp_ready_list))
2061 return list_to_obj(&cli->cl_loi_hp_ready_list, hp_ready_item);
2062 if (!list_empty(&cli->cl_loi_ready_list))
2063 return list_to_obj(&cli->cl_loi_ready_list, ready_item);
2064
2065 /* then if we have cache waiters, return all objects with queued
2066 * writes. This is especially important when many small files
2067 * have filled up the cache and not been fired into rpcs because
2068 * they don't pass the nr_pending/object threshold */
2069 if (!list_empty(&cli->cl_cache_waiters) &&
2070 !list_empty(&cli->cl_loi_write_list))
2071 return list_to_obj(&cli->cl_loi_write_list, write_item);
2072
2073 /* then return all queued objects when we have an invalid import
2074 * so that they get flushed */
2075 if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
2076 if (!list_empty(&cli->cl_loi_write_list))
2077 return list_to_obj(&cli->cl_loi_write_list, write_item);
2078 if (!list_empty(&cli->cl_loi_read_list))
2079 return list_to_obj(&cli->cl_loi_read_list, read_item);
2080 }
2081 return NULL;
2082 }
2083
2084 /* called with the loi list lock held */
osc_check_rpcs(const struct lu_env * env,struct client_obd * cli)2085 static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
2086 {
2087 struct osc_object *osc;
2088 int rc = 0;
2089
2090 while ((osc = osc_next_obj(cli)) != NULL) {
2091 struct cl_object *obj = osc2cl(osc);
2092 struct lu_ref_link link;
2093
2094 OSC_IO_DEBUG(osc, "%lu in flight\n", rpcs_in_flight(cli));
2095
2096 if (osc_max_rpc_in_flight(cli, osc)) {
2097 __osc_list_maint(cli, osc);
2098 break;
2099 }
2100
2101 cl_object_get(obj);
2102 client_obd_list_unlock(&cli->cl_loi_list_lock);
2103 lu_object_ref_add_at(&obj->co_lu, &link, "check",
2104 current);
2105
2106 /* attempt some read/write balancing by alternating between
2107 * reads and writes in an object. The makes_rpc checks here
2108 * would be redundant if we were getting read/write work items
2109 * instead of objects. we don't want send_oap_rpc to drain a
2110 * partial read pending queue when we're given this object to
2111 * do io on writes while there are cache waiters */
2112 osc_object_lock(osc);
2113 if (osc_makes_rpc(cli, osc, OBD_BRW_WRITE)) {
2114 rc = osc_send_write_rpc(env, cli, osc);
2115 if (rc < 0) {
2116 CERROR("Write request failed with %d\n", rc);
2117
2118 /* osc_send_write_rpc failed, mostly because of
2119 * memory pressure.
2120 *
2121 * It can't break here, because if:
2122 * - a page was submitted by osc_io_submit, so
2123 * page locked;
2124 * - no request in flight
2125 * - no subsequent request
2126 * The system will be in live-lock state,
2127 * because there is no chance to call
2128 * osc_io_unplug() and osc_check_rpcs() any
2129 * more. pdflush can't help in this case,
2130 * because it might be blocked at grabbing
2131 * the page lock as we mentioned.
2132 *
2133 * Anyway, continue to drain pages. */
2134 /* break; */
2135 }
2136 }
2137 if (osc_makes_rpc(cli, osc, OBD_BRW_READ)) {
2138 rc = osc_send_read_rpc(env, cli, osc);
2139 if (rc < 0)
2140 CERROR("Read request failed with %d\n", rc);
2141 }
2142 osc_object_unlock(osc);
2143
2144 osc_list_maint(cli, osc);
2145 lu_object_ref_del_at(&obj->co_lu, &link, "check",
2146 current);
2147 cl_object_put(env, obj);
2148
2149 client_obd_list_lock(&cli->cl_loi_list_lock);
2150 }
2151 }
2152
osc_io_unplug0(const struct lu_env * env,struct client_obd * cli,struct osc_object * osc,int async)2153 static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
2154 struct osc_object *osc, int async)
2155 {
2156 int rc = 0;
2157
2158 if (osc != NULL && osc_list_maint(cli, osc) == 0)
2159 return 0;
2160
2161 if (!async) {
2162 /* disable osc_lru_shrink() temporarily to avoid
2163 * potential stack overrun problem. LU-2859 */
2164 atomic_inc(&cli->cl_lru_shrinkers);
2165 client_obd_list_lock(&cli->cl_loi_list_lock);
2166 osc_check_rpcs(env, cli);
2167 client_obd_list_unlock(&cli->cl_loi_list_lock);
2168 atomic_dec(&cli->cl_lru_shrinkers);
2169 } else {
2170 CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
2171 LASSERT(cli->cl_writeback_work != NULL);
2172 rc = ptlrpcd_queue_work(cli->cl_writeback_work);
2173 }
2174 return rc;
2175 }
2176
osc_io_unplug_async(const struct lu_env * env,struct client_obd * cli,struct osc_object * osc)2177 static int osc_io_unplug_async(const struct lu_env *env,
2178 struct client_obd *cli, struct osc_object *osc)
2179 {
2180 return osc_io_unplug0(env, cli, osc, 1);
2181 }
2182
osc_io_unplug(const struct lu_env * env,struct client_obd * cli,struct osc_object * osc)2183 void osc_io_unplug(const struct lu_env *env, struct client_obd *cli,
2184 struct osc_object *osc)
2185 {
2186 (void)osc_io_unplug0(env, cli, osc, 0);
2187 }
2188
osc_prep_async_page(struct osc_object * osc,struct osc_page * ops,struct page * page,loff_t offset)2189 int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
2190 struct page *page, loff_t offset)
2191 {
2192 struct obd_export *exp = osc_export(osc);
2193 struct osc_async_page *oap = &ops->ops_oap;
2194
2195 if (!page)
2196 return cfs_size_round(sizeof(*oap));
2197
2198 oap->oap_magic = OAP_MAGIC;
2199 oap->oap_cli = &exp->exp_obd->u.cli;
2200 oap->oap_obj = osc;
2201
2202 oap->oap_page = page;
2203 oap->oap_obj_off = offset;
2204 LASSERT(!(offset & ~CFS_PAGE_MASK));
2205
2206 if (!client_is_remote(exp) && capable(CFS_CAP_SYS_RESOURCE))
2207 oap->oap_brw_flags = OBD_BRW_NOQUOTA;
2208
2209 INIT_LIST_HEAD(&oap->oap_pending_item);
2210 INIT_LIST_HEAD(&oap->oap_rpc_item);
2211
2212 spin_lock_init(&oap->oap_lock);
2213 CDEBUG(D_INFO, "oap %p page %p obj off %llu\n",
2214 oap, page, oap->oap_obj_off);
2215 return 0;
2216 }
2217
osc_queue_async_io(const struct lu_env * env,struct cl_io * io,struct osc_page * ops)2218 int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
2219 struct osc_page *ops)
2220 {
2221 struct osc_io *oio = osc_env_io(env);
2222 struct osc_extent *ext = NULL;
2223 struct osc_async_page *oap = &ops->ops_oap;
2224 struct client_obd *cli = oap->oap_cli;
2225 struct osc_object *osc = oap->oap_obj;
2226 pgoff_t index;
2227 int grants = 0;
2228 int brw_flags = OBD_BRW_ASYNC;
2229 int cmd = OBD_BRW_WRITE;
2230 int need_release = 0;
2231 int rc = 0;
2232
2233 if (oap->oap_magic != OAP_MAGIC)
2234 return -EINVAL;
2235
2236 if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2237 return -EIO;
2238
2239 if (!list_empty(&oap->oap_pending_item) ||
2240 !list_empty(&oap->oap_rpc_item))
2241 return -EBUSY;
2242
2243 /* Set the OBD_BRW_SRVLOCK before the page is queued. */
2244 brw_flags |= ops->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
2245 if (!client_is_remote(osc_export(osc)) &&
2246 capable(CFS_CAP_SYS_RESOURCE)) {
2247 brw_flags |= OBD_BRW_NOQUOTA;
2248 cmd |= OBD_BRW_NOQUOTA;
2249 }
2250
2251 /* check if the file's owner/group is over quota */
2252 if (!(cmd & OBD_BRW_NOQUOTA)) {
2253 struct cl_object *obj;
2254 struct cl_attr *attr;
2255 unsigned int qid[MAXQUOTAS];
2256
2257 obj = cl_object_top(&osc->oo_cl);
2258 attr = &osc_env_info(env)->oti_attr;
2259
2260 cl_object_attr_lock(obj);
2261 rc = cl_object_attr_get(env, obj, attr);
2262 cl_object_attr_unlock(obj);
2263
2264 qid[USRQUOTA] = attr->cat_uid;
2265 qid[GRPQUOTA] = attr->cat_gid;
2266 if (rc == 0 && osc_quota_chkdq(cli, qid) == NO_QUOTA)
2267 rc = -EDQUOT;
2268 if (rc)
2269 return rc;
2270 }
2271
2272 oap->oap_cmd = cmd;
2273 oap->oap_page_off = ops->ops_from;
2274 oap->oap_count = ops->ops_to - ops->ops_from;
2275 oap->oap_async_flags = 0;
2276 oap->oap_brw_flags = brw_flags;
2277
2278 OSC_IO_DEBUG(osc, "oap %p page %p added for cmd %d\n",
2279 oap, oap->oap_page, oap->oap_cmd & OBD_BRW_RWMASK);
2280
2281 index = oap2cl_page(oap)->cp_index;
2282
2283 /* Add this page into extent by the following steps:
2284 * 1. if there exists an active extent for this IO, mostly this page
2285 * can be added to the active extent and sometimes we need to
2286 * expand extent to accommodate this page;
2287 * 2. otherwise, a new extent will be allocated. */
2288
2289 ext = oio->oi_active;
2290 if (ext != NULL && ext->oe_start <= index && ext->oe_max_end >= index) {
2291 /* one chunk plus extent overhead must be enough to write this
2292 * page */
2293 grants = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
2294 if (ext->oe_end >= index)
2295 grants = 0;
2296
2297 /* it doesn't need any grant to dirty this page */
2298 client_obd_list_lock(&cli->cl_loi_list_lock);
2299 rc = osc_enter_cache_try(cli, oap, grants, 0);
2300 client_obd_list_unlock(&cli->cl_loi_list_lock);
2301 if (rc == 0) { /* try failed */
2302 grants = 0;
2303 need_release = 1;
2304 } else if (ext->oe_end < index) {
2305 int tmp = grants;
2306 /* try to expand this extent */
2307 rc = osc_extent_expand(ext, index, &tmp);
2308 if (rc < 0) {
2309 need_release = 1;
2310 /* don't free reserved grant */
2311 } else {
2312 OSC_EXTENT_DUMP(D_CACHE, ext,
2313 "expanded for %lu.\n", index);
2314 osc_unreserve_grant(cli, grants, tmp);
2315 grants = 0;
2316 }
2317 }
2318 rc = 0;
2319 } else if (ext != NULL) {
2320 /* index is located outside of active extent */
2321 need_release = 1;
2322 }
2323 if (need_release) {
2324 osc_extent_release(env, ext);
2325 oio->oi_active = NULL;
2326 ext = NULL;
2327 }
2328
2329 if (ext == NULL) {
2330 int tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
2331
2332 /* try to find new extent to cover this page */
2333 LASSERT(oio->oi_active == NULL);
2334 /* we may have allocated grant for this page if we failed
2335 * to expand the previous active extent. */
2336 LASSERT(ergo(grants > 0, grants >= tmp));
2337
2338 rc = 0;
2339 if (grants == 0) {
2340 /* we haven't allocated grant for this page. */
2341 rc = osc_enter_cache(env, cli, oap, tmp);
2342 if (rc == 0)
2343 grants = tmp;
2344 }
2345
2346 tmp = grants;
2347 if (rc == 0) {
2348 ext = osc_extent_find(env, osc, index, &tmp);
2349 if (IS_ERR(ext)) {
2350 LASSERT(tmp == grants);
2351 osc_exit_cache(cli, oap);
2352 rc = PTR_ERR(ext);
2353 ext = NULL;
2354 } else {
2355 oio->oi_active = ext;
2356 }
2357 }
2358 if (grants > 0)
2359 osc_unreserve_grant(cli, grants, tmp);
2360 }
2361
2362 LASSERT(ergo(rc == 0, ext != NULL));
2363 if (ext != NULL) {
2364 EASSERTF(ext->oe_end >= index && ext->oe_start <= index,
2365 ext, "index = %lu.\n", index);
2366 LASSERT((oap->oap_brw_flags & OBD_BRW_FROM_GRANT) != 0);
2367
2368 osc_object_lock(osc);
2369 if (ext->oe_nr_pages == 0)
2370 ext->oe_srvlock = ops->ops_srvlock;
2371 else
2372 LASSERT(ext->oe_srvlock == ops->ops_srvlock);
2373 ++ext->oe_nr_pages;
2374 list_add_tail(&oap->oap_pending_item, &ext->oe_pages);
2375 osc_object_unlock(osc);
2376 }
2377 return rc;
2378 }
2379
osc_teardown_async_page(const struct lu_env * env,struct osc_object * obj,struct osc_page * ops)2380 int osc_teardown_async_page(const struct lu_env *env,
2381 struct osc_object *obj, struct osc_page *ops)
2382 {
2383 struct osc_async_page *oap = &ops->ops_oap;
2384 struct osc_extent *ext = NULL;
2385 int rc = 0;
2386
2387 LASSERT(oap->oap_magic == OAP_MAGIC);
2388
2389 CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
2390 oap, ops, oap2cl_page(oap)->cp_index);
2391
2392 osc_object_lock(obj);
2393 if (!list_empty(&oap->oap_rpc_item)) {
2394 CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
2395 rc = -EBUSY;
2396 } else if (!list_empty(&oap->oap_pending_item)) {
2397 ext = osc_extent_lookup(obj, oap2cl_page(oap)->cp_index);
2398 /* only truncated pages are allowed to be taken out.
2399 * See osc_extent_truncate() and osc_cache_truncate_start()
2400 * for details. */
2401 if (ext != NULL && ext->oe_state != OES_TRUNC) {
2402 OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n",
2403 oap2cl_page(oap)->cp_index);
2404 rc = -EBUSY;
2405 }
2406 }
2407 osc_object_unlock(obj);
2408 if (ext != NULL)
2409 osc_extent_put(env, ext);
2410 return rc;
2411 }
2412
2413 /**
2414 * This is called when a page is picked up by kernel to write out.
2415 *
2416 * We should find out the corresponding extent and add the whole extent
2417 * into urgent list. The extent may be being truncated or used, handle it
2418 * carefully.
2419 */
osc_flush_async_page(const struct lu_env * env,struct cl_io * io,struct osc_page * ops)2420 int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
2421 struct osc_page *ops)
2422 {
2423 struct osc_extent *ext = NULL;
2424 struct osc_object *obj = cl2osc(ops->ops_cl.cpl_obj);
2425 struct cl_page *cp = ops->ops_cl.cpl_page;
2426 pgoff_t index = cp->cp_index;
2427 struct osc_async_page *oap = &ops->ops_oap;
2428 bool unplug = false;
2429 int rc = 0;
2430
2431 osc_object_lock(obj);
2432 ext = osc_extent_lookup(obj, index);
2433 if (ext == NULL) {
2434 osc_extent_tree_dump(D_ERROR, obj);
2435 LASSERTF(0, "page index %lu is NOT covered.\n", index);
2436 }
2437
2438 switch (ext->oe_state) {
2439 case OES_RPC:
2440 case OES_LOCK_DONE:
2441 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(cp),
2442 "flush an in-rpc page?\n");
2443 LASSERT(0);
2444 break;
2445 case OES_LOCKING:
2446 /* If we know this extent is being written out, we should abort
2447 * so that the writer can make this page ready. Otherwise, there
2448 * exists a deadlock problem because other process can wait for
2449 * page writeback bit holding page lock; and meanwhile in
2450 * vvp_page_make_ready(), we need to grab page lock before
2451 * really sending the RPC. */
2452 case OES_TRUNC:
2453 /* race with truncate, page will be redirtied */
2454 case OES_ACTIVE:
2455 /* The extent is active so we need to abort and let the caller
2456 * re-dirty the page. If we continued on here, and we were the
2457 * one making the extent active, we could deadlock waiting for
2458 * the page writeback to clear but it won't because the extent
2459 * is active and won't be written out. */
2460 rc = -EAGAIN;
2461 goto out;
2462 default:
2463 break;
2464 }
2465
2466 rc = cl_page_prep(env, io, cl_page_top(cp), CRT_WRITE);
2467 if (rc)
2468 goto out;
2469
2470 spin_lock(&oap->oap_lock);
2471 oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT;
2472 spin_unlock(&oap->oap_lock);
2473
2474 if (memory_pressure_get())
2475 ext->oe_memalloc = 1;
2476
2477 ext->oe_urgent = 1;
2478 if (ext->oe_state == OES_CACHE) {
2479 OSC_EXTENT_DUMP(D_CACHE, ext,
2480 "flush page %p make it urgent.\n", oap);
2481 if (list_empty(&ext->oe_link))
2482 list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
2483 unplug = true;
2484 }
2485 rc = 0;
2486
2487 out:
2488 osc_object_unlock(obj);
2489 osc_extent_put(env, ext);
2490 if (unplug)
2491 osc_io_unplug_async(env, osc_cli(obj), obj);
2492 return rc;
2493 }
2494
2495 /**
2496 * this is called when a sync waiter receives an interruption. Its job is to
2497 * get the caller woken as soon as possible. If its page hasn't been put in an
2498 * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
2499 * desiring interruption which will forcefully complete the rpc once the rpc
2500 * has timed out.
2501 */
osc_cancel_async_page(const struct lu_env * env,struct osc_page * ops)2502 int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops)
2503 {
2504 struct osc_async_page *oap = &ops->ops_oap;
2505 struct osc_object *obj = oap->oap_obj;
2506 struct client_obd *cli = osc_cli(obj);
2507 struct osc_extent *ext;
2508 struct osc_extent *found = NULL;
2509 struct list_head *plist;
2510 pgoff_t index = oap2cl_page(oap)->cp_index;
2511 int rc = -EBUSY;
2512 int cmd;
2513
2514 LASSERT(!oap->oap_interrupted);
2515 oap->oap_interrupted = 1;
2516
2517 /* Find out the caching extent */
2518 osc_object_lock(obj);
2519 if (oap->oap_cmd & OBD_BRW_WRITE) {
2520 plist = &obj->oo_urgent_exts;
2521 cmd = OBD_BRW_WRITE;
2522 } else {
2523 plist = &obj->oo_reading_exts;
2524 cmd = OBD_BRW_READ;
2525 }
2526 list_for_each_entry(ext, plist, oe_link) {
2527 if (ext->oe_start <= index && ext->oe_end >= index) {
2528 LASSERT(ext->oe_state == OES_LOCK_DONE);
2529 /* For OES_LOCK_DONE state extent, it has already held
2530 * a refcount for RPC. */
2531 found = osc_extent_get(ext);
2532 break;
2533 }
2534 }
2535 if (found != NULL) {
2536 list_del_init(&found->oe_link);
2537 osc_update_pending(obj, cmd, -found->oe_nr_pages);
2538 osc_object_unlock(obj);
2539
2540 osc_extent_finish(env, found, 0, -EINTR);
2541 osc_extent_put(env, found);
2542 rc = 0;
2543 } else {
2544 osc_object_unlock(obj);
2545 /* ok, it's been put in an rpc. only one oap gets a request
2546 * reference */
2547 if (oap->oap_request != NULL) {
2548 ptlrpc_mark_interrupted(oap->oap_request);
2549 ptlrpcd_wake(oap->oap_request);
2550 ptlrpc_req_finished(oap->oap_request);
2551 oap->oap_request = NULL;
2552 }
2553 }
2554
2555 osc_list_maint(cli, obj);
2556 return rc;
2557 }
2558
osc_queue_sync_pages(const struct lu_env * env,struct osc_object * obj,struct list_head * list,int cmd,int brw_flags)2559 int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
2560 struct list_head *list, int cmd, int brw_flags)
2561 {
2562 struct client_obd *cli = osc_cli(obj);
2563 struct osc_extent *ext;
2564 struct osc_async_page *oap, *tmp;
2565 int page_count = 0;
2566 int mppr = cli->cl_max_pages_per_rpc;
2567 pgoff_t start = CL_PAGE_EOF;
2568 pgoff_t end = 0;
2569
2570 list_for_each_entry(oap, list, oap_pending_item) {
2571 struct cl_page *cp = oap2cl_page(oap);
2572
2573 if (cp->cp_index > end)
2574 end = cp->cp_index;
2575 if (cp->cp_index < start)
2576 start = cp->cp_index;
2577 ++page_count;
2578 mppr <<= (page_count > mppr);
2579 }
2580
2581 ext = osc_extent_alloc(obj);
2582 if (ext == NULL) {
2583 list_for_each_entry_safe(oap, tmp, list, oap_pending_item) {
2584 list_del_init(&oap->oap_pending_item);
2585 osc_ap_completion(env, cli, oap, 0, -ENOMEM);
2586 }
2587 return -ENOMEM;
2588 }
2589
2590 ext->oe_rw = !!(cmd & OBD_BRW_READ);
2591 ext->oe_urgent = 1;
2592 ext->oe_start = start;
2593 ext->oe_end = ext->oe_max_end = end;
2594 ext->oe_obj = obj;
2595 ext->oe_srvlock = !!(brw_flags & OBD_BRW_SRVLOCK);
2596 ext->oe_nr_pages = page_count;
2597 ext->oe_mppr = mppr;
2598 list_splice_init(list, &ext->oe_pages);
2599
2600 osc_object_lock(obj);
2601 /* Reuse the initial refcount for RPC, don't drop it */
2602 osc_extent_state_set(ext, OES_LOCK_DONE);
2603 if (cmd & OBD_BRW_WRITE) {
2604 list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
2605 osc_update_pending(obj, OBD_BRW_WRITE, page_count);
2606 } else {
2607 list_add_tail(&ext->oe_link, &obj->oo_reading_exts);
2608 osc_update_pending(obj, OBD_BRW_READ, page_count);
2609 }
2610 osc_object_unlock(obj);
2611
2612 osc_io_unplug_async(env, cli, obj);
2613 return 0;
2614 }
2615
2616 /**
2617 * Called by osc_io_setattr_start() to freeze and destroy covering extents.
2618 */
osc_cache_truncate_start(const struct lu_env * env,struct osc_io * oio,struct osc_object * obj,__u64 size)2619 int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio,
2620 struct osc_object *obj, __u64 size)
2621 {
2622 struct client_obd *cli = osc_cli(obj);
2623 struct osc_extent *ext;
2624 struct osc_extent *waiting = NULL;
2625 pgoff_t index;
2626 LIST_HEAD(list);
2627 int result = 0;
2628 bool partial;
2629
2630 /* pages with index greater or equal to index will be truncated. */
2631 index = cl_index(osc2cl(obj), size);
2632 partial = size > cl_offset(osc2cl(obj), index);
2633
2634 again:
2635 osc_object_lock(obj);
2636 ext = osc_extent_search(obj, index);
2637 if (ext == NULL)
2638 ext = first_extent(obj);
2639 else if (ext->oe_end < index)
2640 ext = next_extent(ext);
2641 while (ext != NULL) {
2642 EASSERT(ext->oe_state != OES_TRUNC, ext);
2643
2644 if (ext->oe_state > OES_CACHE || ext->oe_urgent) {
2645 /* if ext is in urgent state, it means there must exist
2646 * a page already having been flushed by write_page().
2647 * We have to wait for this extent because we can't
2648 * truncate that page. */
2649 LASSERT(!ext->oe_hp);
2650 OSC_EXTENT_DUMP(D_CACHE, ext,
2651 "waiting for busy extent\n");
2652 waiting = osc_extent_get(ext);
2653 break;
2654 }
2655
2656 OSC_EXTENT_DUMP(D_CACHE, ext, "try to trunc:%llu.\n", size);
2657
2658 osc_extent_get(ext);
2659 if (ext->oe_state == OES_ACTIVE) {
2660 /* though we grab inode mutex for write path, but we
2661 * release it before releasing extent(in osc_io_end()),
2662 * so there is a race window that an extent is still
2663 * in OES_ACTIVE when truncate starts. */
2664 LASSERT(!ext->oe_trunc_pending);
2665 ext->oe_trunc_pending = 1;
2666 } else {
2667 EASSERT(ext->oe_state == OES_CACHE, ext);
2668 osc_extent_state_set(ext, OES_TRUNC);
2669 osc_update_pending(obj, OBD_BRW_WRITE,
2670 -ext->oe_nr_pages);
2671 }
2672 EASSERT(list_empty(&ext->oe_link), ext);
2673 list_add_tail(&ext->oe_link, &list);
2674
2675 ext = next_extent(ext);
2676 }
2677 osc_object_unlock(obj);
2678
2679 osc_list_maint(cli, obj);
2680
2681 while (!list_empty(&list)) {
2682 int rc;
2683
2684 ext = list_entry(list.next, struct osc_extent, oe_link);
2685 list_del_init(&ext->oe_link);
2686
2687 /* extent may be in OES_ACTIVE state because inode mutex
2688 * is released before osc_io_end() in file write case */
2689 if (ext->oe_state != OES_TRUNC)
2690 osc_extent_wait(env, ext, OES_TRUNC);
2691
2692 rc = osc_extent_truncate(ext, index, partial);
2693 if (rc < 0) {
2694 if (result == 0)
2695 result = rc;
2696
2697 OSC_EXTENT_DUMP(D_ERROR, ext,
2698 "truncate error %d\n", rc);
2699 } else if (ext->oe_nr_pages == 0) {
2700 osc_extent_remove(ext);
2701 } else {
2702 /* this must be an overlapped extent which means only
2703 * part of pages in this extent have been truncated.
2704 */
2705 EASSERTF(ext->oe_start <= index, ext,
2706 "trunc index = %lu/%d.\n", index, partial);
2707 /* fix index to skip this partially truncated extent */
2708 index = ext->oe_end + 1;
2709 partial = false;
2710
2711 /* we need to hold this extent in OES_TRUNC state so
2712 * that no writeback will happen. This is to avoid
2713 * BUG 17397. */
2714 LASSERT(oio->oi_trunc == NULL);
2715 oio->oi_trunc = osc_extent_get(ext);
2716 OSC_EXTENT_DUMP(D_CACHE, ext,
2717 "trunc at %llu\n", size);
2718 }
2719 osc_extent_put(env, ext);
2720 }
2721 if (waiting != NULL) {
2722 int rc;
2723
2724 /* ignore the result of osc_extent_wait the write initiator
2725 * should take care of it. */
2726 rc = osc_extent_wait(env, waiting, OES_INV);
2727 if (rc < 0)
2728 OSC_EXTENT_DUMP(D_CACHE, waiting, "error: %d.\n", rc);
2729
2730 osc_extent_put(env, waiting);
2731 waiting = NULL;
2732 goto again;
2733 }
2734 return result;
2735 }
2736
2737 /**
2738 * Called after osc_io_setattr_end to add oio->oi_trunc back to cache.
2739 */
osc_cache_truncate_end(const struct lu_env * env,struct osc_io * oio,struct osc_object * obj)2740 void osc_cache_truncate_end(const struct lu_env *env, struct osc_io *oio,
2741 struct osc_object *obj)
2742 {
2743 struct osc_extent *ext = oio->oi_trunc;
2744
2745 oio->oi_trunc = NULL;
2746 if (ext != NULL) {
2747 bool unplug = false;
2748
2749 EASSERT(ext->oe_nr_pages > 0, ext);
2750 EASSERT(ext->oe_state == OES_TRUNC, ext);
2751 EASSERT(!ext->oe_urgent, ext);
2752
2753 OSC_EXTENT_DUMP(D_CACHE, ext, "trunc -> cache.\n");
2754 osc_object_lock(obj);
2755 osc_extent_state_set(ext, OES_CACHE);
2756 if (ext->oe_fsync_wait && !ext->oe_urgent) {
2757 ext->oe_urgent = 1;
2758 list_move_tail(&ext->oe_link, &obj->oo_urgent_exts);
2759 unplug = true;
2760 }
2761 osc_update_pending(obj, OBD_BRW_WRITE, ext->oe_nr_pages);
2762 osc_object_unlock(obj);
2763 osc_extent_put(env, ext);
2764
2765 if (unplug)
2766 osc_io_unplug_async(env, osc_cli(obj), obj);
2767 }
2768 }
2769
2770 /**
2771 * Wait for extents in a specific range to be written out.
2772 * The caller must have called osc_cache_writeback_range() to issue IO
2773 * otherwise it will take a long time for this function to finish.
2774 *
2775 * Caller must hold inode_mutex , or cancel exclusive dlm lock so that
2776 * nobody else can dirty this range of file while we're waiting for
2777 * extents to be written.
2778 */
osc_cache_wait_range(const struct lu_env * env,struct osc_object * obj,pgoff_t start,pgoff_t end)2779 int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
2780 pgoff_t start, pgoff_t end)
2781 {
2782 struct osc_extent *ext;
2783 pgoff_t index = start;
2784 int result = 0;
2785
2786 again:
2787 osc_object_lock(obj);
2788 ext = osc_extent_search(obj, index);
2789 if (ext == NULL)
2790 ext = first_extent(obj);
2791 else if (ext->oe_end < index)
2792 ext = next_extent(ext);
2793 while (ext != NULL) {
2794 int rc;
2795
2796 if (ext->oe_start > end)
2797 break;
2798
2799 if (!ext->oe_fsync_wait) {
2800 ext = next_extent(ext);
2801 continue;
2802 }
2803
2804 EASSERT(ergo(ext->oe_state == OES_CACHE,
2805 ext->oe_hp || ext->oe_urgent), ext);
2806 EASSERT(ergo(ext->oe_state == OES_ACTIVE,
2807 !ext->oe_hp && ext->oe_urgent), ext);
2808
2809 index = ext->oe_end + 1;
2810 osc_extent_get(ext);
2811 osc_object_unlock(obj);
2812
2813 rc = osc_extent_wait(env, ext, OES_INV);
2814 if (result == 0)
2815 result = rc;
2816 osc_extent_put(env, ext);
2817 goto again;
2818 }
2819 osc_object_unlock(obj);
2820
2821 OSC_IO_DEBUG(obj, "sync file range.\n");
2822 return result;
2823 }
2824
2825 /**
2826 * Called to write out a range of osc object.
2827 *
2828 * @hp : should be set this is caused by lock cancel;
2829 * @discard: is set if dirty pages should be dropped - file will be deleted or
2830 * truncated, this implies there is no partially discarding extents.
2831 *
2832 * Return how many pages will be issued, or error code if error occurred.
2833 */
osc_cache_writeback_range(const struct lu_env * env,struct osc_object * obj,pgoff_t start,pgoff_t end,int hp,int discard)2834 int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
2835 pgoff_t start, pgoff_t end, int hp, int discard)
2836 {
2837 struct osc_extent *ext;
2838 LIST_HEAD(discard_list);
2839 bool unplug = false;
2840 int result = 0;
2841
2842 osc_object_lock(obj);
2843 ext = osc_extent_search(obj, start);
2844 if (ext == NULL)
2845 ext = first_extent(obj);
2846 else if (ext->oe_end < start)
2847 ext = next_extent(ext);
2848 while (ext != NULL) {
2849 if (ext->oe_start > end)
2850 break;
2851
2852 ext->oe_fsync_wait = 1;
2853 switch (ext->oe_state) {
2854 case OES_CACHE:
2855 result += ext->oe_nr_pages;
2856 if (!discard) {
2857 struct list_head *list = NULL;
2858
2859 if (hp) {
2860 EASSERT(!ext->oe_hp, ext);
2861 ext->oe_hp = 1;
2862 list = &obj->oo_hp_exts;
2863 } else if (!ext->oe_urgent) {
2864 ext->oe_urgent = 1;
2865 list = &obj->oo_urgent_exts;
2866 }
2867 if (list != NULL)
2868 list_move_tail(&ext->oe_link, list);
2869 unplug = true;
2870 } else {
2871 /* the only discarder is lock cancelling, so
2872 * [start, end] must contain this extent */
2873 EASSERT(ext->oe_start >= start &&
2874 ext->oe_max_end <= end, ext);
2875 osc_extent_state_set(ext, OES_LOCKING);
2876 ext->oe_owner = current;
2877 list_move_tail(&ext->oe_link,
2878 &discard_list);
2879 osc_update_pending(obj, OBD_BRW_WRITE,
2880 -ext->oe_nr_pages);
2881 }
2882 break;
2883 case OES_ACTIVE:
2884 /* It's pretty bad to wait for ACTIVE extents, because
2885 * we don't know how long we will wait for it to be
2886 * flushed since it may be blocked at awaiting more
2887 * grants. We do this for the correctness of fsync. */
2888 LASSERT(hp == 0 && discard == 0);
2889 ext->oe_urgent = 1;
2890 break;
2891 case OES_TRUNC:
2892 /* this extent is being truncated, can't do anything
2893 * for it now. it will be set to urgent after truncate
2894 * is finished in osc_cache_truncate_end(). */
2895 default:
2896 break;
2897 }
2898 ext = next_extent(ext);
2899 }
2900 osc_object_unlock(obj);
2901
2902 LASSERT(ergo(!discard, list_empty(&discard_list)));
2903 if (!list_empty(&discard_list)) {
2904 struct osc_extent *tmp;
2905 int rc;
2906
2907 osc_list_maint(osc_cli(obj), obj);
2908 list_for_each_entry_safe(ext, tmp, &discard_list, oe_link) {
2909 list_del_init(&ext->oe_link);
2910 EASSERT(ext->oe_state == OES_LOCKING, ext);
2911
2912 /* Discard caching pages. We don't actually write this
2913 * extent out but we complete it as if we did. */
2914 rc = osc_extent_make_ready(env, ext);
2915 if (unlikely(rc < 0)) {
2916 OSC_EXTENT_DUMP(D_ERROR, ext,
2917 "make_ready returned %d\n", rc);
2918 if (result >= 0)
2919 result = rc;
2920 }
2921
2922 /* finish the extent as if the pages were sent */
2923 osc_extent_finish(env, ext, 0, 0);
2924 }
2925 }
2926
2927 if (unplug)
2928 osc_io_unplug(env, osc_cli(obj), obj);
2929
2930 if (hp || discard) {
2931 int rc;
2932
2933 rc = osc_cache_wait_range(env, obj, start, end);
2934 if (result >= 0 && rc < 0)
2935 result = rc;
2936 }
2937
2938 OSC_IO_DEBUG(obj, "cache page out.\n");
2939 return result;
2940 }
2941
2942 /** @} osc */
2943