1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36 
37 #include <linux/fs.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/highmem.h>
41 #include <linux/pagemap.h>
42 
43 #define DEBUG_SUBSYSTEM S_LLITE
44 
45 #include "../include/obd_support.h"
46 #include "../include/lustre_lite.h"
47 #include "../include/lustre_dlm.h"
48 #include "llite_internal.h"
49 
50 #define SA_OMITTED_ENTRY_MAX 8ULL
51 
52 typedef enum {
53 	/** negative values are for error cases */
54 	SA_ENTRY_INIT = 0,      /** init entry */
55 	SA_ENTRY_SUCC = 1,      /** stat succeed */
56 	SA_ENTRY_INVA = 2,      /** invalid entry */
57 	SA_ENTRY_DEST = 3,      /** entry to be destroyed */
58 } se_stat_t;
59 
60 struct ll_sa_entry {
61 	/* link into sai->sai_entries */
62 	struct list_head	      se_link;
63 	/* link into sai->sai_entries_{received,stated} */
64 	struct list_head	      se_list;
65 	/* link into sai hash table locally */
66 	struct list_head	      se_hash;
67 	/* entry reference count */
68 	atomic_t	    se_refcount;
69 	/* entry index in the sai */
70 	__u64		   se_index;
71 	/* low layer ldlm lock handle */
72 	__u64		   se_handle;
73 	/* entry status */
74 	se_stat_t	       se_stat;
75 	/* entry size, contains name */
76 	int		     se_size;
77 	/* pointer to async getattr enqueue info */
78 	struct md_enqueue_info *se_minfo;
79 	/* pointer to the async getattr request */
80 	struct ptlrpc_request  *se_req;
81 	/* pointer to the target inode */
82 	struct inode	   *se_inode;
83 	/* entry name */
84 	struct qstr	     se_qstr;
85 };
86 
87 static unsigned int sai_generation;
88 static DEFINE_SPINLOCK(sai_generation_lock);
89 
ll_sa_entry_unhashed(struct ll_sa_entry * entry)90 static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry)
91 {
92 	return list_empty(&entry->se_hash);
93 }
94 
95 /*
96  * The entry only can be released by the caller, it is necessary to hold lock.
97  */
ll_sa_entry_stated(struct ll_sa_entry * entry)98 static inline int ll_sa_entry_stated(struct ll_sa_entry *entry)
99 {
100 	smp_rmb();
101 	return (entry->se_stat != SA_ENTRY_INIT);
102 }
103 
ll_sa_entry_hash(int val)104 static inline int ll_sa_entry_hash(int val)
105 {
106 	return val & LL_SA_CACHE_MASK;
107 }
108 
109 /*
110  * Insert entry to hash SA table.
111  */
112 static inline void
ll_sa_entry_enhash(struct ll_statahead_info * sai,struct ll_sa_entry * entry)113 ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
114 {
115 	int i = ll_sa_entry_hash(entry->se_qstr.hash);
116 
117 	spin_lock(&sai->sai_cache_lock[i]);
118 	list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
119 	spin_unlock(&sai->sai_cache_lock[i]);
120 }
121 
122 /*
123  * Remove entry from SA table.
124  */
125 static inline void
ll_sa_entry_unhash(struct ll_statahead_info * sai,struct ll_sa_entry * entry)126 ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
127 {
128 	int i = ll_sa_entry_hash(entry->se_qstr.hash);
129 
130 	spin_lock(&sai->sai_cache_lock[i]);
131 	list_del_init(&entry->se_hash);
132 	spin_unlock(&sai->sai_cache_lock[i]);
133 }
134 
agl_should_run(struct ll_statahead_info * sai,struct inode * inode)135 static inline int agl_should_run(struct ll_statahead_info *sai,
136 				 struct inode *inode)
137 {
138 	return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
139 }
140 
141 static inline struct ll_sa_entry *
sa_first_received_entry(struct ll_statahead_info * sai)142 sa_first_received_entry(struct ll_statahead_info *sai)
143 {
144 	return list_entry(sai->sai_entries_received.next,
145 			      struct ll_sa_entry, se_list);
146 }
147 
148 static inline struct ll_inode_info *
agl_first_entry(struct ll_statahead_info * sai)149 agl_first_entry(struct ll_statahead_info *sai)
150 {
151 	return list_entry(sai->sai_entries_agl.next,
152 			      struct ll_inode_info, lli_agl_list);
153 }
154 
sa_sent_full(struct ll_statahead_info * sai)155 static inline int sa_sent_full(struct ll_statahead_info *sai)
156 {
157 	return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
158 }
159 
sa_received_empty(struct ll_statahead_info * sai)160 static inline int sa_received_empty(struct ll_statahead_info *sai)
161 {
162 	return list_empty(&sai->sai_entries_received);
163 }
164 
agl_list_empty(struct ll_statahead_info * sai)165 static inline int agl_list_empty(struct ll_statahead_info *sai)
166 {
167 	return list_empty(&sai->sai_entries_agl);
168 }
169 
170 /**
171  * (1) hit ratio less than 80%
172  * or
173  * (2) consecutive miss more than 8
174  * then means low hit.
175  */
sa_low_hit(struct ll_statahead_info * sai)176 static inline int sa_low_hit(struct ll_statahead_info *sai)
177 {
178 	return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
179 		(sai->sai_consecutive_miss > 8));
180 }
181 
182 /*
183  * If the given index is behind of statahead window more than
184  * SA_OMITTED_ENTRY_MAX, then it is old.
185  */
is_omitted_entry(struct ll_statahead_info * sai,__u64 index)186 static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
187 {
188 	return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
189 		 sai->sai_index);
190 }
191 
192 /*
193  * Insert it into sai_entries tail when init.
194  */
195 static struct ll_sa_entry *
ll_sa_entry_alloc(struct ll_statahead_info * sai,__u64 index,const char * name,int len)196 ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
197 		  const char *name, int len)
198 {
199 	struct ll_inode_info *lli;
200 	struct ll_sa_entry   *entry;
201 	int		   entry_size;
202 	char		 *dname;
203 
204 	entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4;
205 	entry = kzalloc(entry_size, GFP_NOFS);
206 	if (unlikely(!entry))
207 		return ERR_PTR(-ENOMEM);
208 
209 	CDEBUG(D_READA, "alloc sa entry %.*s(%p) index %llu\n",
210 	       len, name, entry, index);
211 
212 	entry->se_index = index;
213 
214 	/*
215 	 * Statahead entry reference rules:
216 	 *
217 	 * 1) When statahead entry is initialized, its reference is set as 2.
218 	 *    One reference is used by the directory scanner. When the scanner
219 	 *    searches the statahead cache for the given name, it can perform
220 	 *    lockless hash lookup (only the scanner can remove entry from hash
221 	 *    list), and once found, it needn't to call "atomic_inc()" for the
222 	 *    entry reference. So the performance is improved. After using the
223 	 *    statahead entry, the scanner will call "atomic_dec()" to drop the
224 	 *    reference held when initialization. If it is the last reference,
225 	 *    the statahead entry will be freed.
226 	 *
227 	 * 2) All other threads, including statahead thread and ptlrpcd thread,
228 	 *    when they process the statahead entry, the reference for target
229 	 *    should be held to guarantee the entry will not be released by the
230 	 *    directory scanner. After processing the entry, these threads will
231 	 *    drop the entry reference. If it is the last reference, the entry
232 	 *    will be freed.
233 	 *
234 	 *    The second reference when initializes the statahead entry is used
235 	 *    by the statahead thread, following the rule 2).
236 	 */
237 	atomic_set(&entry->se_refcount, 2);
238 	entry->se_stat = SA_ENTRY_INIT;
239 	entry->se_size = entry_size;
240 	dname = (char *)entry + sizeof(struct ll_sa_entry);
241 	memcpy(dname, name, len);
242 	dname[len] = 0;
243 	entry->se_qstr.hash = full_name_hash(name, len);
244 	entry->se_qstr.len = len;
245 	entry->se_qstr.name = dname;
246 
247 	lli = ll_i2info(sai->sai_inode);
248 	spin_lock(&lli->lli_sa_lock);
249 	list_add_tail(&entry->se_link, &sai->sai_entries);
250 	INIT_LIST_HEAD(&entry->se_list);
251 	ll_sa_entry_enhash(sai, entry);
252 	spin_unlock(&lli->lli_sa_lock);
253 
254 	atomic_inc(&sai->sai_cache_count);
255 
256 	return entry;
257 }
258 
259 /*
260  * Used by the directory scanner to search entry with name.
261  *
262  * Only the caller can remove the entry from hash, so it is unnecessary to hold
263  * hash lock. It is caller's duty to release the init refcount on the entry, so
264  * it is also unnecessary to increase refcount on the entry.
265  */
266 static struct ll_sa_entry *
ll_sa_entry_get_byname(struct ll_statahead_info * sai,const struct qstr * qstr)267 ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr)
268 {
269 	struct ll_sa_entry *entry;
270 	int i = ll_sa_entry_hash(qstr->hash);
271 
272 	list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
273 		if (entry->se_qstr.hash == qstr->hash &&
274 		    entry->se_qstr.len == qstr->len &&
275 		    memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
276 			return entry;
277 	}
278 	return NULL;
279 }
280 
281 /*
282  * Used by the async getattr request callback to find entry with index.
283  *
284  * Inside lli_sa_lock to prevent others to change the list during the search.
285  * It needs to increase entry refcount before returning to guarantee that the
286  * entry cannot be freed by others.
287  */
288 static struct ll_sa_entry *
ll_sa_entry_get_byindex(struct ll_statahead_info * sai,__u64 index)289 ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index)
290 {
291 	struct ll_sa_entry *entry;
292 
293 	list_for_each_entry(entry, &sai->sai_entries, se_link) {
294 		if (entry->se_index == index) {
295 			LASSERT(atomic_read(&entry->se_refcount) > 0);
296 			atomic_inc(&entry->se_refcount);
297 			return entry;
298 		}
299 		if (entry->se_index > index)
300 			break;
301 	}
302 	return NULL;
303 }
304 
ll_sa_entry_cleanup(struct ll_statahead_info * sai,struct ll_sa_entry * entry)305 static void ll_sa_entry_cleanup(struct ll_statahead_info *sai,
306 				 struct ll_sa_entry *entry)
307 {
308 	struct md_enqueue_info *minfo = entry->se_minfo;
309 	struct ptlrpc_request  *req   = entry->se_req;
310 
311 	if (minfo) {
312 		entry->se_minfo = NULL;
313 		ll_intent_release(&minfo->mi_it);
314 		iput(minfo->mi_dir);
315 		OBD_FREE_PTR(minfo);
316 	}
317 
318 	if (req) {
319 		entry->se_req = NULL;
320 		ptlrpc_req_finished(req);
321 	}
322 }
323 
ll_sa_entry_put(struct ll_statahead_info * sai,struct ll_sa_entry * entry)324 static void ll_sa_entry_put(struct ll_statahead_info *sai,
325 			     struct ll_sa_entry *entry)
326 {
327 	if (atomic_dec_and_test(&entry->se_refcount)) {
328 		CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n",
329 		       entry->se_qstr.len, entry->se_qstr.name, entry,
330 		       entry->se_index);
331 
332 		LASSERT(list_empty(&entry->se_link));
333 		LASSERT(list_empty(&entry->se_list));
334 		LASSERT(ll_sa_entry_unhashed(entry));
335 
336 		ll_sa_entry_cleanup(sai, entry);
337 		iput(entry->se_inode);
338 
339 		OBD_FREE(entry, entry->se_size);
340 		atomic_dec(&sai->sai_cache_count);
341 	}
342 }
343 
344 static inline void
do_sa_entry_fini(struct ll_statahead_info * sai,struct ll_sa_entry * entry)345 do_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
346 {
347 	struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
348 
349 	LASSERT(!ll_sa_entry_unhashed(entry));
350 	LASSERT(!list_empty(&entry->se_link));
351 
352 	ll_sa_entry_unhash(sai, entry);
353 
354 	spin_lock(&lli->lli_sa_lock);
355 	entry->se_stat = SA_ENTRY_DEST;
356 	list_del_init(&entry->se_link);
357 	if (likely(!list_empty(&entry->se_list)))
358 		list_del_init(&entry->se_list);
359 	spin_unlock(&lli->lli_sa_lock);
360 
361 	ll_sa_entry_put(sai, entry);
362 }
363 
364 /*
365  * Delete it from sai_entries_stated list when fini.
366  */
367 static void
ll_sa_entry_fini(struct ll_statahead_info * sai,struct ll_sa_entry * entry)368 ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
369 {
370 	struct ll_sa_entry *pos, *next;
371 
372 	if (entry)
373 		do_sa_entry_fini(sai, entry);
374 
375 	/* drop old entry, only 'scanner' process does this, no need to lock */
376 	list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) {
377 		if (!is_omitted_entry(sai, pos->se_index))
378 			break;
379 		do_sa_entry_fini(sai, pos);
380 	}
381 }
382 
383 /*
384  * Inside lli_sa_lock.
385  */
386 static void
do_sa_entry_to_stated(struct ll_statahead_info * sai,struct ll_sa_entry * entry,se_stat_t stat)387 do_sa_entry_to_stated(struct ll_statahead_info *sai,
388 		      struct ll_sa_entry *entry, se_stat_t stat)
389 {
390 	struct ll_sa_entry *se;
391 	struct list_head	 *pos = &sai->sai_entries_stated;
392 
393 	if (!list_empty(&entry->se_list))
394 		list_del_init(&entry->se_list);
395 
396 	list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
397 		if (se->se_index < entry->se_index) {
398 			pos = &se->se_list;
399 			break;
400 		}
401 	}
402 
403 	list_add(&entry->se_list, pos);
404 	entry->se_stat = stat;
405 }
406 
407 /*
408  * Move entry to sai_entries_stated and sort with the index.
409  * \retval 1    -- entry to be destroyed.
410  * \retval 0    -- entry is inserted into stated list.
411  */
412 static int
ll_sa_entry_to_stated(struct ll_statahead_info * sai,struct ll_sa_entry * entry,se_stat_t stat)413 ll_sa_entry_to_stated(struct ll_statahead_info *sai,
414 		      struct ll_sa_entry *entry, se_stat_t stat)
415 {
416 	struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
417 	int		   ret = 1;
418 
419 	ll_sa_entry_cleanup(sai, entry);
420 
421 	spin_lock(&lli->lli_sa_lock);
422 	if (likely(entry->se_stat != SA_ENTRY_DEST)) {
423 		do_sa_entry_to_stated(sai, entry, stat);
424 		ret = 0;
425 	}
426 	spin_unlock(&lli->lli_sa_lock);
427 
428 	return ret;
429 }
430 
431 /*
432  * Insert inode into the list of sai_entries_agl.
433  */
ll_agl_add(struct ll_statahead_info * sai,struct inode * inode,int index)434 static void ll_agl_add(struct ll_statahead_info *sai,
435 		       struct inode *inode, int index)
436 {
437 	struct ll_inode_info *child  = ll_i2info(inode);
438 	struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
439 	int		   added  = 0;
440 
441 	spin_lock(&child->lli_agl_lock);
442 	if (child->lli_agl_index == 0) {
443 		child->lli_agl_index = index;
444 		spin_unlock(&child->lli_agl_lock);
445 
446 		LASSERT(list_empty(&child->lli_agl_list));
447 
448 		igrab(inode);
449 		spin_lock(&parent->lli_agl_lock);
450 		if (agl_list_empty(sai))
451 			added = 1;
452 		list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
453 		spin_unlock(&parent->lli_agl_lock);
454 	} else {
455 		spin_unlock(&child->lli_agl_lock);
456 	}
457 
458 	if (added > 0)
459 		wake_up(&sai->sai_agl_thread.t_ctl_waitq);
460 }
461 
ll_sai_alloc(void)462 static struct ll_statahead_info *ll_sai_alloc(void)
463 {
464 	struct ll_statahead_info *sai;
465 	int		       i;
466 
467 	sai = kzalloc(sizeof(*sai), GFP_NOFS);
468 	if (!sai)
469 		return NULL;
470 
471 	atomic_set(&sai->sai_refcount, 1);
472 
473 	spin_lock(&sai_generation_lock);
474 	sai->sai_generation = ++sai_generation;
475 	if (unlikely(sai_generation == 0))
476 		sai->sai_generation = ++sai_generation;
477 	spin_unlock(&sai_generation_lock);
478 
479 	sai->sai_max = LL_SA_RPC_MIN;
480 	sai->sai_index = 1;
481 	init_waitqueue_head(&sai->sai_waitq);
482 	init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
483 	init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
484 
485 	INIT_LIST_HEAD(&sai->sai_entries);
486 	INIT_LIST_HEAD(&sai->sai_entries_received);
487 	INIT_LIST_HEAD(&sai->sai_entries_stated);
488 	INIT_LIST_HEAD(&sai->sai_entries_agl);
489 
490 	for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
491 		INIT_LIST_HEAD(&sai->sai_cache[i]);
492 		spin_lock_init(&sai->sai_cache_lock[i]);
493 	}
494 	atomic_set(&sai->sai_cache_count, 0);
495 
496 	return sai;
497 }
498 
499 static inline struct ll_statahead_info *
ll_sai_get(struct ll_statahead_info * sai)500 ll_sai_get(struct ll_statahead_info *sai)
501 {
502 	atomic_inc(&sai->sai_refcount);
503 	return sai;
504 }
505 
ll_sai_put(struct ll_statahead_info * sai)506 static void ll_sai_put(struct ll_statahead_info *sai)
507 {
508 	struct inode	 *inode = sai->sai_inode;
509 	struct ll_inode_info *lli   = ll_i2info(inode);
510 
511 	if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
512 		struct ll_sa_entry *entry, *next;
513 
514 		if (unlikely(atomic_read(&sai->sai_refcount) > 0)) {
515 			/* It is race case, the interpret callback just hold
516 			 * a reference count */
517 			spin_unlock(&lli->lli_sa_lock);
518 			return;
519 		}
520 
521 		LASSERT(lli->lli_opendir_key == NULL);
522 		LASSERT(thread_is_stopped(&sai->sai_thread));
523 		LASSERT(thread_is_stopped(&sai->sai_agl_thread));
524 
525 		lli->lli_sai = NULL;
526 		lli->lli_opendir_pid = 0;
527 		spin_unlock(&lli->lli_sa_lock);
528 
529 		if (sai->sai_sent > sai->sai_replied)
530 			CDEBUG(D_READA, "statahead for dir "DFID
531 			      " does not finish: [sent:%llu] [replied:%llu]\n",
532 			      PFID(&lli->lli_fid),
533 			      sai->sai_sent, sai->sai_replied);
534 
535 		list_for_each_entry_safe(entry, next,
536 					     &sai->sai_entries, se_link)
537 			do_sa_entry_fini(sai, entry);
538 
539 		LASSERT(list_empty(&sai->sai_entries));
540 		LASSERT(sa_received_empty(sai));
541 		LASSERT(list_empty(&sai->sai_entries_stated));
542 
543 		LASSERT(atomic_read(&sai->sai_cache_count) == 0);
544 		LASSERT(agl_list_empty(sai));
545 
546 		iput(inode);
547 		OBD_FREE_PTR(sai);
548 	}
549 }
550 
551 /* Do NOT forget to drop inode refcount when into sai_entries_agl. */
ll_agl_trigger(struct inode * inode,struct ll_statahead_info * sai)552 static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
553 {
554 	struct ll_inode_info *lli   = ll_i2info(inode);
555 	__u64		 index = lli->lli_agl_index;
556 	int		   rc;
557 
558 	LASSERT(list_empty(&lli->lli_agl_list));
559 
560 	/* AGL maybe fall behind statahead with one entry */
561 	if (is_omitted_entry(sai, index + 1)) {
562 		lli->lli_agl_index = 0;
563 		iput(inode);
564 		return;
565 	}
566 
567 	/* Someone is in glimpse (sync or async), do nothing. */
568 	rc = down_write_trylock(&lli->lli_glimpse_sem);
569 	if (rc == 0) {
570 		lli->lli_agl_index = 0;
571 		iput(inode);
572 		return;
573 	}
574 
575 	/*
576 	 * Someone triggered glimpse within 1 sec before.
577 	 * 1) The former glimpse succeeded with glimpse lock granted by OST, and
578 	 *    if the lock is still cached on client, AGL needs to do nothing. If
579 	 *    it is cancelled by other client, AGL maybe cannot obtain new lock
580 	 *    for no glimpse callback triggered by AGL.
581 	 * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
582 	 *    Under such case, it is quite possible that the OST will not grant
583 	 *    glimpse lock for AGL also.
584 	 * 3) The former glimpse failed, compared with other two cases, it is
585 	 *    relative rare. AGL can ignore such case, and it will not muchly
586 	 *    affect the performance.
587 	 */
588 	if (lli->lli_glimpse_time != 0 &&
589 	    time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
590 		up_write(&lli->lli_glimpse_sem);
591 		lli->lli_agl_index = 0;
592 		iput(inode);
593 		return;
594 	}
595 
596 	CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
597 	       DFID", idx = %llu\n", PFID(&lli->lli_fid), index);
598 
599 	cl_agl(inode);
600 	lli->lli_agl_index = 0;
601 	lli->lli_glimpse_time = cfs_time_current();
602 	up_write(&lli->lli_glimpse_sem);
603 
604 	CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
605 	       DFID", idx = %llu, rc = %d\n",
606 	       PFID(&lli->lli_fid), index, rc);
607 
608 	iput(inode);
609 }
610 
ll_post_statahead(struct ll_statahead_info * sai)611 static void ll_post_statahead(struct ll_statahead_info *sai)
612 {
613 	struct inode	   *dir   = sai->sai_inode;
614 	struct inode	   *child;
615 	struct ll_inode_info   *lli   = ll_i2info(dir);
616 	struct ll_sa_entry     *entry;
617 	struct md_enqueue_info *minfo;
618 	struct lookup_intent   *it;
619 	struct ptlrpc_request  *req;
620 	struct mdt_body	*body;
621 	int		     rc    = 0;
622 
623 	spin_lock(&lli->lli_sa_lock);
624 	if (unlikely(sa_received_empty(sai))) {
625 		spin_unlock(&lli->lli_sa_lock);
626 		return;
627 	}
628 	entry = sa_first_received_entry(sai);
629 	atomic_inc(&entry->se_refcount);
630 	list_del_init(&entry->se_list);
631 	spin_unlock(&lli->lli_sa_lock);
632 
633 	LASSERT(entry->se_handle != 0);
634 
635 	minfo = entry->se_minfo;
636 	it = &minfo->mi_it;
637 	req = entry->se_req;
638 	body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
639 	if (body == NULL) {
640 		rc = -EFAULT;
641 		goto out;
642 	}
643 
644 	child = entry->se_inode;
645 	if (child == NULL) {
646 		/*
647 		 * lookup.
648 		 */
649 		LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
650 
651 		/* XXX: No fid in reply, this is probably cross-ref case.
652 		 * SA can't handle it yet. */
653 		if (body->valid & OBD_MD_MDS) {
654 			rc = -EAGAIN;
655 			goto out;
656 		}
657 	} else {
658 		/*
659 		 * revalidate.
660 		 */
661 		/* unlinked and re-created with the same name */
662 		if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->fid1))){
663 			entry->se_inode = NULL;
664 			iput(child);
665 			child = NULL;
666 		}
667 	}
668 
669 	it->d.lustre.it_lock_handle = entry->se_handle;
670 	rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
671 	if (rc != 1) {
672 		rc = -EAGAIN;
673 		goto out;
674 	}
675 
676 	rc = ll_prep_inode(&child, req, dir->i_sb, it);
677 	if (rc)
678 		goto out;
679 
680 	CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
681 	       child, child->i_ino, child->i_generation);
682 	ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
683 
684 	entry->se_inode = child;
685 
686 	if (agl_should_run(sai, child))
687 		ll_agl_add(sai, child, entry->se_index);
688 
689 out:
690 	/* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
691 	 * reference count by calling "ll_intent_drop_lock()" in spite of the
692 	 * above operations failed or not. Do not worry about calling
693 	 * "ll_intent_drop_lock()" more than once. */
694 	rc = ll_sa_entry_to_stated(sai, entry,
695 				   rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
696 	if (rc == 0 && entry->se_index == sai->sai_index_wait)
697 		wake_up(&sai->sai_waitq);
698 	ll_sa_entry_put(sai, entry);
699 }
700 
ll_statahead_interpret(struct ptlrpc_request * req,struct md_enqueue_info * minfo,int rc)701 static int ll_statahead_interpret(struct ptlrpc_request *req,
702 				  struct md_enqueue_info *minfo, int rc)
703 {
704 	struct lookup_intent     *it  = &minfo->mi_it;
705 	struct inode	     *dir = minfo->mi_dir;
706 	struct ll_inode_info     *lli = ll_i2info(dir);
707 	struct ll_statahead_info *sai = NULL;
708 	struct ll_sa_entry       *entry;
709 	__u64			  handle = 0;
710 	int		       wakeup;
711 
712 	if (it_disposition(it, DISP_LOOKUP_NEG))
713 		rc = -ENOENT;
714 
715 	if (rc == 0) {
716 		/* release ibits lock ASAP to avoid deadlock when statahead
717 		 * thread enqueues lock on parent in readdir and another
718 		 * process enqueues lock on child with parent lock held, eg.
719 		 * unlink. */
720 		handle = it->d.lustre.it_lock_handle;
721 		ll_intent_drop_lock(it);
722 	}
723 
724 	spin_lock(&lli->lli_sa_lock);
725 	/* stale entry */
726 	if (unlikely(lli->lli_sai == NULL ||
727 		     lli->lli_sai->sai_generation != minfo->mi_generation)) {
728 		spin_unlock(&lli->lli_sa_lock);
729 		rc = -ESTALE;
730 		goto out;
731 	} else {
732 		sai = ll_sai_get(lli->lli_sai);
733 		if (unlikely(!thread_is_running(&sai->sai_thread))) {
734 			sai->sai_replied++;
735 			spin_unlock(&lli->lli_sa_lock);
736 			rc = -EBADFD;
737 			goto out;
738 		}
739 
740 		entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
741 		if (entry == NULL) {
742 			sai->sai_replied++;
743 			spin_unlock(&lli->lli_sa_lock);
744 			rc = -EIDRM;
745 			goto out;
746 		}
747 
748 		if (rc != 0) {
749 			do_sa_entry_to_stated(sai, entry, SA_ENTRY_INVA);
750 			wakeup = (entry->se_index == sai->sai_index_wait);
751 		} else {
752 			entry->se_minfo = minfo;
753 			entry->se_req = ptlrpc_request_addref(req);
754 			/* Release the async ibits lock ASAP to avoid deadlock
755 			 * when statahead thread tries to enqueue lock on parent
756 			 * for readpage and other tries to enqueue lock on child
757 			 * with parent's lock held, for example: unlink. */
758 			entry->se_handle = handle;
759 			wakeup = sa_received_empty(sai);
760 			list_add_tail(&entry->se_list,
761 					  &sai->sai_entries_received);
762 		}
763 		sai->sai_replied++;
764 		spin_unlock(&lli->lli_sa_lock);
765 
766 		ll_sa_entry_put(sai, entry);
767 		if (wakeup)
768 			wake_up(&sai->sai_thread.t_ctl_waitq);
769 	}
770 
771 out:
772 	if (rc != 0) {
773 		ll_intent_release(it);
774 		iput(dir);
775 		OBD_FREE_PTR(minfo);
776 	}
777 	if (sai != NULL)
778 		ll_sai_put(sai);
779 	return rc;
780 }
781 
sa_args_fini(struct md_enqueue_info * minfo,struct ldlm_enqueue_info * einfo)782 static void sa_args_fini(struct md_enqueue_info *minfo,
783 			 struct ldlm_enqueue_info *einfo)
784 {
785 	LASSERT(minfo && einfo);
786 	iput(minfo->mi_dir);
787 	capa_put(minfo->mi_data.op_capa1);
788 	capa_put(minfo->mi_data.op_capa2);
789 	OBD_FREE_PTR(minfo);
790 	OBD_FREE_PTR(einfo);
791 }
792 
793 /**
794  * There is race condition between "capa_put" and "ll_statahead_interpret" for
795  * accessing "op_data.op_capa[1,2]" as following:
796  * "capa_put" releases "op_data.op_capa[1,2]"'s reference count after calling
797  * "md_intent_getattr_async". But "ll_statahead_interpret" maybe run first, and
798  * fill "op_data.op_capa[1,2]" as POISON, then cause "capa_put" access invalid
799  * "ocapa". So here reserve "op_data.op_capa[1,2]" in "pcapa" before calling
800  * "md_intent_getattr_async".
801  */
sa_args_init(struct inode * dir,struct inode * child,struct ll_sa_entry * entry,struct md_enqueue_info ** pmi,struct ldlm_enqueue_info ** pei,struct obd_capa ** pcapa)802 static int sa_args_init(struct inode *dir, struct inode *child,
803 			struct ll_sa_entry *entry, struct md_enqueue_info **pmi,
804 			struct ldlm_enqueue_info **pei,
805 			struct obd_capa **pcapa)
806 {
807 	struct qstr	      *qstr = &entry->se_qstr;
808 	struct ll_inode_info     *lli  = ll_i2info(dir);
809 	struct md_enqueue_info   *minfo;
810 	struct ldlm_enqueue_info *einfo;
811 	struct md_op_data	*op_data;
812 
813 	einfo = kzalloc(sizeof(*einfo), GFP_NOFS);
814 	if (!einfo)
815 		return -ENOMEM;
816 
817 	minfo = kzalloc(sizeof(*minfo), GFP_NOFS);
818 	if (!minfo) {
819 		OBD_FREE_PTR(einfo);
820 		return -ENOMEM;
821 	}
822 
823 	op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name,
824 				     qstr->len, 0, LUSTRE_OPC_ANY, NULL);
825 	if (IS_ERR(op_data)) {
826 		OBD_FREE_PTR(einfo);
827 		OBD_FREE_PTR(minfo);
828 		return PTR_ERR(op_data);
829 	}
830 
831 	minfo->mi_it.it_op = IT_GETATTR;
832 	minfo->mi_dir = igrab(dir);
833 	minfo->mi_cb = ll_statahead_interpret;
834 	minfo->mi_generation = lli->lli_sai->sai_generation;
835 	minfo->mi_cbdata = entry->se_index;
836 
837 	einfo->ei_type   = LDLM_IBITS;
838 	einfo->ei_mode   = it_to_lock_mode(&minfo->mi_it);
839 	einfo->ei_cb_bl  = ll_md_blocking_ast;
840 	einfo->ei_cb_cp  = ldlm_completion_ast;
841 	einfo->ei_cb_gl  = NULL;
842 	einfo->ei_cbdata = NULL;
843 
844 	*pmi = minfo;
845 	*pei = einfo;
846 	pcapa[0] = op_data->op_capa1;
847 	pcapa[1] = op_data->op_capa2;
848 
849 	return 0;
850 }
851 
do_sa_lookup(struct inode * dir,struct ll_sa_entry * entry)852 static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
853 {
854 	struct md_enqueue_info   *minfo;
855 	struct ldlm_enqueue_info *einfo;
856 	struct obd_capa	  *capas[2];
857 	int		       rc;
858 
859 	rc = sa_args_init(dir, NULL, entry, &minfo, &einfo, capas);
860 	if (rc)
861 		return rc;
862 
863 	rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
864 	if (!rc) {
865 		capa_put(capas[0]);
866 		capa_put(capas[1]);
867 	} else {
868 		sa_args_fini(minfo, einfo);
869 	}
870 
871 	return rc;
872 }
873 
874 /**
875  * similar to ll_revalidate_it().
876  * \retval      1 -- dentry valid
877  * \retval      0 -- will send stat-ahead request
878  * \retval others -- prepare stat-ahead request failed
879  */
do_sa_revalidate(struct inode * dir,struct ll_sa_entry * entry,struct dentry * dentry)880 static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
881 			    struct dentry *dentry)
882 {
883 	struct inode	     *inode = d_inode(dentry);
884 	struct lookup_intent      it = { .it_op = IT_GETATTR,
885 					 .d.lustre.it_lock_handle = 0 };
886 	struct md_enqueue_info   *minfo;
887 	struct ldlm_enqueue_info *einfo;
888 	struct obd_capa	  *capas[2];
889 	int rc;
890 
891 	if (unlikely(inode == NULL))
892 		return 1;
893 
894 	if (d_mountpoint(dentry))
895 		return 1;
896 
897 	entry->se_inode = igrab(inode);
898 	rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
899 				NULL);
900 	if (rc == 1) {
901 		entry->se_handle = it.d.lustre.it_lock_handle;
902 		ll_intent_release(&it);
903 		return 1;
904 	}
905 
906 	rc = sa_args_init(dir, inode, entry, &minfo, &einfo, capas);
907 	if (rc) {
908 		entry->se_inode = NULL;
909 		iput(inode);
910 		return rc;
911 	}
912 
913 	rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
914 	if (!rc) {
915 		capa_put(capas[0]);
916 		capa_put(capas[1]);
917 	} else {
918 		entry->se_inode = NULL;
919 		iput(inode);
920 		sa_args_fini(minfo, einfo);
921 	}
922 
923 	return rc;
924 }
925 
ll_statahead_one(struct dentry * parent,const char * entry_name,int entry_name_len)926 static void ll_statahead_one(struct dentry *parent, const char *entry_name,
927 			     int entry_name_len)
928 {
929 	struct inode	     *dir    = d_inode(parent);
930 	struct ll_inode_info     *lli    = ll_i2info(dir);
931 	struct ll_statahead_info *sai    = lli->lli_sai;
932 	struct dentry	    *dentry = NULL;
933 	struct ll_sa_entry       *entry;
934 	int		       rc;
935 	int		       rc1;
936 
937 	entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name,
938 				  entry_name_len);
939 	if (IS_ERR(entry))
940 		return;
941 
942 	dentry = d_lookup(parent, &entry->se_qstr);
943 	if (!dentry) {
944 		rc = do_sa_lookup(dir, entry);
945 	} else {
946 		rc = do_sa_revalidate(dir, entry, dentry);
947 		if (rc == 1 && agl_should_run(sai, d_inode(dentry)))
948 			ll_agl_add(sai, d_inode(dentry), entry->se_index);
949 	}
950 
951 	if (dentry != NULL)
952 		dput(dentry);
953 
954 	if (rc) {
955 		rc1 = ll_sa_entry_to_stated(sai, entry,
956 					rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
957 		if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
958 			wake_up(&sai->sai_waitq);
959 	} else {
960 		sai->sai_sent++;
961 	}
962 
963 	sai->sai_index++;
964 	/* drop one refcount on entry by ll_sa_entry_alloc */
965 	ll_sa_entry_put(sai, entry);
966 }
967 
ll_agl_thread(void * arg)968 static int ll_agl_thread(void *arg)
969 {
970 	struct dentry	    *parent = (struct dentry *)arg;
971 	struct inode	     *dir    = d_inode(parent);
972 	struct ll_inode_info     *plli   = ll_i2info(dir);
973 	struct ll_inode_info     *clli;
974 	struct ll_sb_info	*sbi    = ll_i2sbi(dir);
975 	struct ll_statahead_info *sai    = ll_sai_get(plli->lli_sai);
976 	struct ptlrpc_thread     *thread = &sai->sai_agl_thread;
977 	struct l_wait_info	lwi    = { 0 };
978 
979 	thread->t_pid = current_pid();
980 	CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n",
981 	       sai, parent);
982 
983 	atomic_inc(&sbi->ll_agl_total);
984 	spin_lock(&plli->lli_agl_lock);
985 	sai->sai_agl_valid = 1;
986 	if (thread_is_init(thread))
987 		/* If someone else has changed the thread state
988 		 * (e.g. already changed to SVC_STOPPING), we can't just
989 		 * blindly overwrite that setting. */
990 		thread_set_flags(thread, SVC_RUNNING);
991 	spin_unlock(&plli->lli_agl_lock);
992 	wake_up(&thread->t_ctl_waitq);
993 
994 	while (1) {
995 		l_wait_event(thread->t_ctl_waitq,
996 			     !agl_list_empty(sai) ||
997 			     !thread_is_running(thread),
998 			     &lwi);
999 
1000 		if (!thread_is_running(thread))
1001 			break;
1002 
1003 		spin_lock(&plli->lli_agl_lock);
1004 		/* The statahead thread maybe help to process AGL entries,
1005 		 * so check whether list empty again. */
1006 		if (!agl_list_empty(sai)) {
1007 			clli = agl_first_entry(sai);
1008 			list_del_init(&clli->lli_agl_list);
1009 			spin_unlock(&plli->lli_agl_lock);
1010 			ll_agl_trigger(&clli->lli_vfs_inode, sai);
1011 		} else {
1012 			spin_unlock(&plli->lli_agl_lock);
1013 		}
1014 	}
1015 
1016 	spin_lock(&plli->lli_agl_lock);
1017 	sai->sai_agl_valid = 0;
1018 	while (!agl_list_empty(sai)) {
1019 		clli = agl_first_entry(sai);
1020 		list_del_init(&clli->lli_agl_list);
1021 		spin_unlock(&plli->lli_agl_lock);
1022 		clli->lli_agl_index = 0;
1023 		iput(&clli->lli_vfs_inode);
1024 		spin_lock(&plli->lli_agl_lock);
1025 	}
1026 	thread_set_flags(thread, SVC_STOPPED);
1027 	spin_unlock(&plli->lli_agl_lock);
1028 	wake_up(&thread->t_ctl_waitq);
1029 	ll_sai_put(sai);
1030 	CDEBUG(D_READA, "agl thread stopped: sai %p, parent %pd\n",
1031 	       sai, parent);
1032 	return 0;
1033 }
1034 
ll_start_agl(struct dentry * parent,struct ll_statahead_info * sai)1035 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
1036 {
1037 	struct ptlrpc_thread *thread = &sai->sai_agl_thread;
1038 	struct l_wait_info    lwi    = { 0 };
1039 	struct ll_inode_info  *plli;
1040 	struct task_struct *task;
1041 
1042 	CDEBUG(D_READA, "start agl thread: sai %p, parent %pd\n",
1043 	       sai, parent);
1044 
1045 	plli = ll_i2info(d_inode(parent));
1046 	task = kthread_run(ll_agl_thread, parent,
1047 			       "ll_agl_%u", plli->lli_opendir_pid);
1048 	if (IS_ERR(task)) {
1049 		CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
1050 		thread_set_flags(thread, SVC_STOPPED);
1051 		return;
1052 	}
1053 
1054 	l_wait_event(thread->t_ctl_waitq,
1055 		     thread_is_running(thread) || thread_is_stopped(thread),
1056 		     &lwi);
1057 }
1058 
ll_statahead_thread(void * arg)1059 static int ll_statahead_thread(void *arg)
1060 {
1061 	struct dentry	    *parent = (struct dentry *)arg;
1062 	struct inode	     *dir    = d_inode(parent);
1063 	struct ll_inode_info     *plli   = ll_i2info(dir);
1064 	struct ll_inode_info     *clli;
1065 	struct ll_sb_info	*sbi    = ll_i2sbi(dir);
1066 	struct ll_statahead_info *sai    = ll_sai_get(plli->lli_sai);
1067 	struct ptlrpc_thread     *thread = &sai->sai_thread;
1068 	struct ptlrpc_thread *agl_thread = &sai->sai_agl_thread;
1069 	struct page	      *page;
1070 	__u64		     pos    = 0;
1071 	int		       first  = 0;
1072 	int		       rc     = 0;
1073 	struct ll_dir_chain       chain;
1074 	struct l_wait_info	lwi    = { 0 };
1075 
1076 	thread->t_pid = current_pid();
1077 	CDEBUG(D_READA, "statahead thread starting: sai %p, parent %pd\n",
1078 	       sai, parent);
1079 
1080 	if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
1081 		ll_start_agl(parent, sai);
1082 
1083 	atomic_inc(&sbi->ll_sa_total);
1084 	spin_lock(&plli->lli_sa_lock);
1085 	if (thread_is_init(thread))
1086 		/* If someone else has changed the thread state
1087 		 * (e.g. already changed to SVC_STOPPING), we can't just
1088 		 * blindly overwrite that setting. */
1089 		thread_set_flags(thread, SVC_RUNNING);
1090 	spin_unlock(&plli->lli_sa_lock);
1091 	wake_up(&thread->t_ctl_waitq);
1092 
1093 	ll_dir_chain_init(&chain);
1094 	page = ll_get_dir_page(dir, pos, &chain);
1095 
1096 	while (1) {
1097 		struct lu_dirpage *dp;
1098 		struct lu_dirent  *ent;
1099 
1100 		if (IS_ERR(page)) {
1101 			rc = PTR_ERR(page);
1102 			CDEBUG(D_READA, "error reading dir "DFID" at %llu/%llu: [rc %d] [parent %u]\n",
1103 			       PFID(ll_inode2fid(dir)), pos, sai->sai_index,
1104 			       rc, plli->lli_opendir_pid);
1105 			goto out;
1106 		}
1107 
1108 		dp = page_address(page);
1109 		for (ent = lu_dirent_start(dp); ent != NULL;
1110 		     ent = lu_dirent_next(ent)) {
1111 			__u64 hash;
1112 			int namelen;
1113 			char *name;
1114 
1115 			hash = le64_to_cpu(ent->lde_hash);
1116 			if (unlikely(hash < pos))
1117 				/*
1118 				 * Skip until we find target hash value.
1119 				 */
1120 				continue;
1121 
1122 			namelen = le16_to_cpu(ent->lde_namelen);
1123 			if (unlikely(namelen == 0))
1124 				/*
1125 				 * Skip dummy record.
1126 				 */
1127 				continue;
1128 
1129 			name = ent->lde_name;
1130 			if (name[0] == '.') {
1131 				if (namelen == 1) {
1132 					/*
1133 					 * skip "."
1134 					 */
1135 					continue;
1136 				} else if (name[1] == '.' && namelen == 2) {
1137 					/*
1138 					 * skip ".."
1139 					 */
1140 					continue;
1141 				} else if (!sai->sai_ls_all) {
1142 					/*
1143 					 * skip hidden files.
1144 					 */
1145 					sai->sai_skip_hidden++;
1146 					continue;
1147 				}
1148 			}
1149 
1150 			/*
1151 			 * don't stat-ahead first entry.
1152 			 */
1153 			if (unlikely(++first == 1))
1154 				continue;
1155 
1156 keep_it:
1157 			l_wait_event(thread->t_ctl_waitq,
1158 				     !sa_sent_full(sai) ||
1159 				     !sa_received_empty(sai) ||
1160 				     !agl_list_empty(sai) ||
1161 				     !thread_is_running(thread),
1162 				     &lwi);
1163 
1164 interpret_it:
1165 			while (!sa_received_empty(sai))
1166 				ll_post_statahead(sai);
1167 
1168 			if (unlikely(!thread_is_running(thread))) {
1169 				ll_release_page(page, 0);
1170 				rc = 0;
1171 				goto out;
1172 			}
1173 
1174 			/* If no window for metadata statahead, but there are
1175 			 * some AGL entries to be triggered, then try to help
1176 			 * to process the AGL entries. */
1177 			if (sa_sent_full(sai)) {
1178 				spin_lock(&plli->lli_agl_lock);
1179 				while (!agl_list_empty(sai)) {
1180 					clli = agl_first_entry(sai);
1181 					list_del_init(&clli->lli_agl_list);
1182 					spin_unlock(&plli->lli_agl_lock);
1183 					ll_agl_trigger(&clli->lli_vfs_inode,
1184 						       sai);
1185 
1186 					if (!sa_received_empty(sai))
1187 						goto interpret_it;
1188 
1189 					if (unlikely(
1190 						!thread_is_running(thread))) {
1191 						ll_release_page(page, 0);
1192 						rc = 0;
1193 						goto out;
1194 					}
1195 
1196 					if (!sa_sent_full(sai))
1197 						goto do_it;
1198 
1199 					spin_lock(&plli->lli_agl_lock);
1200 				}
1201 				spin_unlock(&plli->lli_agl_lock);
1202 
1203 				goto keep_it;
1204 			}
1205 
1206 do_it:
1207 			ll_statahead_one(parent, name, namelen);
1208 		}
1209 		pos = le64_to_cpu(dp->ldp_hash_end);
1210 		if (pos == MDS_DIR_END_OFF) {
1211 			/*
1212 			 * End of directory reached.
1213 			 */
1214 			ll_release_page(page, 0);
1215 			while (1) {
1216 				l_wait_event(thread->t_ctl_waitq,
1217 					     !sa_received_empty(sai) ||
1218 					     sai->sai_sent == sai->sai_replied||
1219 					     !thread_is_running(thread),
1220 					     &lwi);
1221 
1222 				while (!sa_received_empty(sai))
1223 					ll_post_statahead(sai);
1224 
1225 				if (unlikely(!thread_is_running(thread))) {
1226 					rc = 0;
1227 					goto out;
1228 				}
1229 
1230 				if (sai->sai_sent == sai->sai_replied &&
1231 				    sa_received_empty(sai))
1232 					break;
1233 			}
1234 
1235 			spin_lock(&plli->lli_agl_lock);
1236 			while (!agl_list_empty(sai) &&
1237 			       thread_is_running(thread)) {
1238 				clli = agl_first_entry(sai);
1239 				list_del_init(&clli->lli_agl_list);
1240 				spin_unlock(&plli->lli_agl_lock);
1241 				ll_agl_trigger(&clli->lli_vfs_inode, sai);
1242 				spin_lock(&plli->lli_agl_lock);
1243 			}
1244 			spin_unlock(&plli->lli_agl_lock);
1245 
1246 			rc = 0;
1247 			goto out;
1248 		} else if (1) {
1249 			/*
1250 			 * chain is exhausted.
1251 			 * Normal case: continue to the next page.
1252 			 */
1253 			ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1254 					      LDF_COLLIDE);
1255 			page = ll_get_dir_page(dir, pos, &chain);
1256 		} else {
1257 			LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1258 			ll_release_page(page, 1);
1259 			/*
1260 			 * go into overflow page.
1261 			 */
1262 		}
1263 	}
1264 
1265 out:
1266 	if (sai->sai_agl_valid) {
1267 		spin_lock(&plli->lli_agl_lock);
1268 		thread_set_flags(agl_thread, SVC_STOPPING);
1269 		spin_unlock(&plli->lli_agl_lock);
1270 		wake_up(&agl_thread->t_ctl_waitq);
1271 
1272 		CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n",
1273 		       sai, (unsigned int)agl_thread->t_pid);
1274 		l_wait_event(agl_thread->t_ctl_waitq,
1275 			     thread_is_stopped(agl_thread),
1276 			     &lwi);
1277 	} else {
1278 		/* Set agl_thread flags anyway. */
1279 		thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
1280 	}
1281 	ll_dir_chain_fini(&chain);
1282 	spin_lock(&plli->lli_sa_lock);
1283 	if (!sa_received_empty(sai)) {
1284 		thread_set_flags(thread, SVC_STOPPING);
1285 		spin_unlock(&plli->lli_sa_lock);
1286 
1287 		/* To release the resources held by received entries. */
1288 		while (!sa_received_empty(sai))
1289 			ll_post_statahead(sai);
1290 
1291 		spin_lock(&plli->lli_sa_lock);
1292 	}
1293 	thread_set_flags(thread, SVC_STOPPED);
1294 	spin_unlock(&plli->lli_sa_lock);
1295 	wake_up(&sai->sai_waitq);
1296 	wake_up(&thread->t_ctl_waitq);
1297 	ll_sai_put(sai);
1298 	dput(parent);
1299 	CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %pd\n",
1300 	       sai, parent);
1301 	return rc;
1302 }
1303 
1304 /**
1305  * called in ll_file_release().
1306  */
ll_stop_statahead(struct inode * dir,void * key)1307 void ll_stop_statahead(struct inode *dir, void *key)
1308 {
1309 	struct ll_inode_info *lli = ll_i2info(dir);
1310 
1311 	if (unlikely(key == NULL))
1312 		return;
1313 
1314 	spin_lock(&lli->lli_sa_lock);
1315 	if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
1316 		spin_unlock(&lli->lli_sa_lock);
1317 		return;
1318 	}
1319 
1320 	lli->lli_opendir_key = NULL;
1321 
1322 	if (lli->lli_sai) {
1323 		struct l_wait_info lwi = { 0 };
1324 		struct ptlrpc_thread *thread = &lli->lli_sai->sai_thread;
1325 
1326 		if (!thread_is_stopped(thread)) {
1327 			thread_set_flags(thread, SVC_STOPPING);
1328 			spin_unlock(&lli->lli_sa_lock);
1329 			wake_up(&thread->t_ctl_waitq);
1330 
1331 			CDEBUG(D_READA, "stop statahead thread: sai %p pid %u\n",
1332 			       lli->lli_sai, (unsigned int)thread->t_pid);
1333 			l_wait_event(thread->t_ctl_waitq,
1334 				     thread_is_stopped(thread),
1335 				     &lwi);
1336 		} else {
1337 			spin_unlock(&lli->lli_sa_lock);
1338 		}
1339 
1340 		/*
1341 		 * Put the ref which was held when first statahead_enter.
1342 		 * It maybe not the last ref for some statahead requests
1343 		 * maybe inflight.
1344 		 */
1345 		ll_sai_put(lli->lli_sai);
1346 	} else {
1347 		lli->lli_opendir_pid = 0;
1348 		spin_unlock(&lli->lli_sa_lock);
1349 	}
1350 }
1351 
1352 enum {
1353 	/**
1354 	 * not first dirent, or is "."
1355 	 */
1356 	LS_NONE_FIRST_DE = 0,
1357 	/**
1358 	 * the first non-hidden dirent
1359 	 */
1360 	LS_FIRST_DE,
1361 	/**
1362 	 * the first hidden dirent, that is "."
1363 	 */
1364 	LS_FIRST_DOT_DE
1365 };
1366 
is_first_dirent(struct inode * dir,struct dentry * dentry)1367 static int is_first_dirent(struct inode *dir, struct dentry *dentry)
1368 {
1369 	struct ll_dir_chain   chain;
1370 	struct qstr	  *target = &dentry->d_name;
1371 	struct page	  *page;
1372 	__u64		 pos    = 0;
1373 	int		   dot_de;
1374 	int		   rc     = LS_NONE_FIRST_DE;
1375 
1376 	ll_dir_chain_init(&chain);
1377 	page = ll_get_dir_page(dir, pos, &chain);
1378 
1379 	while (1) {
1380 		struct lu_dirpage *dp;
1381 		struct lu_dirent  *ent;
1382 
1383 		if (IS_ERR(page)) {
1384 			struct ll_inode_info *lli = ll_i2info(dir);
1385 
1386 			rc = PTR_ERR(page);
1387 			CERROR("error reading dir "DFID" at %llu: [rc %d] [parent %u]\n",
1388 			       PFID(ll_inode2fid(dir)), pos,
1389 			       rc, lli->lli_opendir_pid);
1390 			break;
1391 		}
1392 
1393 		dp = page_address(page);
1394 		for (ent = lu_dirent_start(dp); ent != NULL;
1395 		     ent = lu_dirent_next(ent)) {
1396 			__u64 hash;
1397 			int namelen;
1398 			char *name;
1399 
1400 			hash = le64_to_cpu(ent->lde_hash);
1401 			/* The ll_get_dir_page() can return any page containing
1402 			 * the given hash which may be not the start hash. */
1403 			if (unlikely(hash < pos))
1404 				continue;
1405 
1406 			namelen = le16_to_cpu(ent->lde_namelen);
1407 			if (unlikely(namelen == 0))
1408 				/*
1409 				 * skip dummy record.
1410 				 */
1411 				continue;
1412 
1413 			name = ent->lde_name;
1414 			if (name[0] == '.') {
1415 				if (namelen == 1)
1416 					/*
1417 					 * skip "."
1418 					 */
1419 					continue;
1420 				else if (name[1] == '.' && namelen == 2)
1421 					/*
1422 					 * skip ".."
1423 					 */
1424 					continue;
1425 				else
1426 					dot_de = 1;
1427 			} else {
1428 				dot_de = 0;
1429 			}
1430 
1431 			if (dot_de && target->name[0] != '.') {
1432 				CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
1433 				       target->len, target->name,
1434 				       namelen, name);
1435 				continue;
1436 			}
1437 
1438 			if (target->len != namelen ||
1439 			    memcmp(target->name, name, namelen) != 0)
1440 				rc = LS_NONE_FIRST_DE;
1441 			else if (!dot_de)
1442 				rc = LS_FIRST_DE;
1443 			else
1444 				rc = LS_FIRST_DOT_DE;
1445 
1446 			ll_release_page(page, 0);
1447 			goto out;
1448 		}
1449 		pos = le64_to_cpu(dp->ldp_hash_end);
1450 		if (pos == MDS_DIR_END_OFF) {
1451 			/*
1452 			 * End of directory reached.
1453 			 */
1454 			ll_release_page(page, 0);
1455 			break;
1456 		} else if (1) {
1457 			/*
1458 			 * chain is exhausted
1459 			 * Normal case: continue to the next page.
1460 			 */
1461 			ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
1462 					      LDF_COLLIDE);
1463 			page = ll_get_dir_page(dir, pos, &chain);
1464 		} else {
1465 			/*
1466 			 * go into overflow page.
1467 			 */
1468 			LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1469 			ll_release_page(page, 1);
1470 		}
1471 	}
1472 
1473 out:
1474 	ll_dir_chain_fini(&chain);
1475 	return rc;
1476 }
1477 
1478 static void
ll_sai_unplug(struct ll_statahead_info * sai,struct ll_sa_entry * entry)1479 ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
1480 {
1481 	struct ptlrpc_thread *thread = &sai->sai_thread;
1482 	struct ll_sb_info    *sbi    = ll_i2sbi(sai->sai_inode);
1483 	int		   hit;
1484 
1485 	if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC)
1486 		hit = 1;
1487 	else
1488 		hit = 0;
1489 
1490 	ll_sa_entry_fini(sai, entry);
1491 	if (hit) {
1492 		sai->sai_hit++;
1493 		sai->sai_consecutive_miss = 0;
1494 		sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
1495 	} else {
1496 		struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
1497 
1498 		sai->sai_miss++;
1499 		sai->sai_consecutive_miss++;
1500 		if (sa_low_hit(sai) && thread_is_running(thread)) {
1501 			atomic_inc(&sbi->ll_sa_wrong);
1502 			CDEBUG(D_READA, "Statahead for dir " DFID " hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread\n",
1503 			       PFID(&lli->lli_fid), sai->sai_hit,
1504 			       sai->sai_miss, sai->sai_sent,
1505 			       sai->sai_replied);
1506 			spin_lock(&lli->lli_sa_lock);
1507 			if (!thread_is_stopped(thread))
1508 				thread_set_flags(thread, SVC_STOPPING);
1509 			spin_unlock(&lli->lli_sa_lock);
1510 		}
1511 	}
1512 
1513 	if (!thread_is_stopped(thread))
1514 		wake_up(&thread->t_ctl_waitq);
1515 }
1516 
1517 /**
1518  * Start statahead thread if this is the first dir entry.
1519  * Otherwise if a thread is started already, wait it until it is ahead of me.
1520  * \retval 1       -- find entry with lock in cache, the caller needs to do
1521  *		    nothing.
1522  * \retval 0       -- find entry in cache, but without lock, the caller needs
1523  *		    refresh from MDS.
1524  * \retval others  -- the caller need to process as non-statahead.
1525  */
do_statahead_enter(struct inode * dir,struct dentry ** dentryp,int only_unplug)1526 int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
1527 		       int only_unplug)
1528 {
1529 	struct ll_inode_info     *lli   = ll_i2info(dir);
1530 	struct ll_statahead_info *sai   = lli->lli_sai;
1531 	struct dentry	    *parent;
1532 	struct ll_sa_entry       *entry;
1533 	struct ptlrpc_thread     *thread;
1534 	struct l_wait_info	lwi   = { 0 };
1535 	int		       rc    = 0;
1536 	struct ll_inode_info     *plli;
1537 
1538 	LASSERT(lli->lli_opendir_pid == current_pid());
1539 
1540 	if (sai) {
1541 		thread = &sai->sai_thread;
1542 		if (unlikely(thread_is_stopped(thread) &&
1543 			     list_empty(&sai->sai_entries_stated))) {
1544 			/* to release resource */
1545 			ll_stop_statahead(dir, lli->lli_opendir_key);
1546 			return -EAGAIN;
1547 		}
1548 
1549 		if ((*dentryp)->d_name.name[0] == '.') {
1550 			if (sai->sai_ls_all ||
1551 			    sai->sai_miss_hidden >= sai->sai_skip_hidden) {
1552 				/*
1553 				 * Hidden dentry is the first one, or statahead
1554 				 * thread does not skip so many hidden dentries
1555 				 * before "sai_ls_all" enabled as below.
1556 				 */
1557 			} else {
1558 				if (!sai->sai_ls_all)
1559 					/*
1560 					 * It maybe because hidden dentry is not
1561 					 * the first one, "sai_ls_all" was not
1562 					 * set, then "ls -al" missed. Enable
1563 					 * "sai_ls_all" for such case.
1564 					 */
1565 					sai->sai_ls_all = 1;
1566 
1567 				/*
1568 				 * Such "getattr" has been skipped before
1569 				 * "sai_ls_all" enabled as above.
1570 				 */
1571 				sai->sai_miss_hidden++;
1572 				return -EAGAIN;
1573 			}
1574 		}
1575 
1576 		entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name);
1577 		if (entry == NULL || only_unplug) {
1578 			ll_sai_unplug(sai, entry);
1579 			return entry ? 1 : -EAGAIN;
1580 		}
1581 
1582 		if (!ll_sa_entry_stated(entry)) {
1583 			sai->sai_index_wait = entry->se_index;
1584 			lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
1585 					       LWI_ON_SIGNAL_NOOP, NULL);
1586 			rc = l_wait_event(sai->sai_waitq,
1587 					  ll_sa_entry_stated(entry) ||
1588 					  thread_is_stopped(thread),
1589 					  &lwi);
1590 			if (rc < 0) {
1591 				ll_sai_unplug(sai, entry);
1592 				return -EAGAIN;
1593 			}
1594 		}
1595 
1596 		if (entry->se_stat == SA_ENTRY_SUCC &&
1597 		    entry->se_inode != NULL) {
1598 			struct inode *inode = entry->se_inode;
1599 			struct lookup_intent it = { .it_op = IT_GETATTR,
1600 						    .d.lustre.it_lock_handle =
1601 						     entry->se_handle };
1602 			__u64 bits;
1603 
1604 			rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
1605 						ll_inode2fid(inode), &bits);
1606 			if (rc == 1) {
1607 				if (d_inode(*dentryp) == NULL) {
1608 					struct dentry *alias;
1609 
1610 					alias = ll_splice_alias(inode,
1611 								   *dentryp);
1612 					if (IS_ERR(alias)) {
1613 						ll_sai_unplug(sai, entry);
1614 						return PTR_ERR(alias);
1615 					}
1616 					*dentryp = alias;
1617 				} else if (d_inode(*dentryp) != inode) {
1618 					/* revalidate, but inode is recreated */
1619 					CDEBUG(D_READA,
1620 					      "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n",
1621 					      *dentryp,
1622 					      d_inode(*dentryp)->i_ino,
1623 					      d_inode(*dentryp)->i_generation,
1624 					      inode->i_ino,
1625 					      inode->i_generation);
1626 					ll_sai_unplug(sai, entry);
1627 					return -ESTALE;
1628 				} else {
1629 					iput(inode);
1630 				}
1631 				entry->se_inode = NULL;
1632 
1633 				if ((bits & MDS_INODELOCK_LOOKUP) &&
1634 				    d_lustre_invalid(*dentryp))
1635 					d_lustre_revalidate(*dentryp);
1636 				ll_intent_release(&it);
1637 			}
1638 		}
1639 
1640 		ll_sai_unplug(sai, entry);
1641 		return rc;
1642 	}
1643 
1644 	/* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
1645 	rc = is_first_dirent(dir, *dentryp);
1646 	if (rc == LS_NONE_FIRST_DE) {
1647 		/* It is not "ls -{a}l" operation, no need statahead for it. */
1648 		rc = -EAGAIN;
1649 		goto out;
1650 	}
1651 
1652 	sai = ll_sai_alloc();
1653 	if (sai == NULL) {
1654 		rc = -ENOMEM;
1655 		goto out;
1656 	}
1657 
1658 	sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
1659 	sai->sai_inode = igrab(dir);
1660 	if (unlikely(sai->sai_inode == NULL)) {
1661 		CWARN("Do not start stat ahead on dying inode "DFID"\n",
1662 		      PFID(&lli->lli_fid));
1663 		rc = -ESTALE;
1664 		goto out;
1665 	}
1666 
1667 	/* get parent reference count here, and put it in ll_statahead_thread */
1668 	parent = dget((*dentryp)->d_parent);
1669 	if (unlikely(sai->sai_inode != d_inode(parent))) {
1670 		struct ll_inode_info *nlli = ll_i2info(d_inode(parent));
1671 
1672 		CWARN("Race condition, someone changed %pd just now: old parent "DFID", new parent "DFID"\n",
1673 		      *dentryp,
1674 		      PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
1675 		dput(parent);
1676 		iput(sai->sai_inode);
1677 		rc = -EAGAIN;
1678 		goto out;
1679 	}
1680 
1681 	CDEBUG(D_READA, "start statahead thread: sai %p, parent %pd\n",
1682 	       sai, parent);
1683 
1684 	/* The sai buffer already has one reference taken at allocation time,
1685 	 * but as soon as we expose the sai by attaching it to the lli that
1686 	 * default reference can be dropped by another thread calling
1687 	 * ll_stop_statahead. We need to take a local reference to protect
1688 	 * the sai buffer while we intend to access it. */
1689 	ll_sai_get(sai);
1690 	lli->lli_sai = sai;
1691 
1692 	plli = ll_i2info(d_inode(parent));
1693 	rc = PTR_ERR(kthread_run(ll_statahead_thread, parent,
1694 				 "ll_sa_%u", plli->lli_opendir_pid));
1695 	thread = &sai->sai_thread;
1696 	if (IS_ERR_VALUE(rc)) {
1697 		CERROR("can't start ll_sa thread, rc: %d\n", rc);
1698 		dput(parent);
1699 		lli->lli_opendir_key = NULL;
1700 		thread_set_flags(thread, SVC_STOPPED);
1701 		thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
1702 		/* Drop both our own local reference and the default
1703 		 * reference from allocation time. */
1704 		ll_sai_put(sai);
1705 		ll_sai_put(sai);
1706 		LASSERT(lli->lli_sai == NULL);
1707 		return -EAGAIN;
1708 	}
1709 
1710 	l_wait_event(thread->t_ctl_waitq,
1711 		     thread_is_running(thread) || thread_is_stopped(thread),
1712 		     &lwi);
1713 	ll_sai_put(sai);
1714 
1715 	/*
1716 	 * We don't stat-ahead for the first dirent since we are already in
1717 	 * lookup.
1718 	 */
1719 	return -EAGAIN;
1720 
1721 out:
1722 	if (sai != NULL)
1723 		OBD_FREE_PTR(sai);
1724 	spin_lock(&lli->lli_sa_lock);
1725 	lli->lli_opendir_key = NULL;
1726 	lli->lli_opendir_pid = 0;
1727 	spin_unlock(&lli->lli_sa_lock);
1728 	return rc;
1729 }
1730