1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2010, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/ldlm/ldlm_pool.c
37 *
38 * Author: Yury Umanets <umka@clusterfs.com>
39 */
40
41 /*
42 * Idea of this code is rather simple. Each second, for each server namespace
43 * we have SLV - server lock volume which is calculated on current number of
44 * granted locks, grant speed for past period, etc - that is, locking load.
45 * This SLV number may be thought as a flow definition for simplicity. It is
46 * sent to clients with each occasion to let them know what is current load
47 * situation on the server. By default, at the beginning, SLV on server is
48 * set max value which is calculated as the following: allow to one client
49 * have all locks of limit ->pl_limit for 10h.
50 *
51 * Next, on clients, number of cached locks is not limited artificially in any
52 * way as it was before. Instead, client calculates CLV, that is, client lock
53 * volume for each lock and compares it with last SLV from the server. CLV is
54 * calculated as the number of locks in LRU * lock live time in seconds. If
55 * CLV > SLV - lock is canceled.
56 *
57 * Client has LVF, that is, lock volume factor which regulates how much
58 * sensitive client should be about last SLV from server. The higher LVF is the
59 * more locks will be canceled on client. Default value for it is 1. Setting LVF
60 * to 2 means that client will cancel locks 2 times faster.
61 *
62 * Locks on a client will be canceled more intensively in these cases:
63 * (1) if SLV is smaller, that is, load is higher on the server;
64 * (2) client has a lot of locks (the more locks are held by client, the bigger
65 * chances that some of them should be canceled);
66 * (3) client has old locks (taken some time ago);
67 *
68 * Thus, according to flow paradigm that we use for better understanding SLV,
69 * CLV is the volume of particle in flow described by SLV. According to this,
70 * if flow is getting thinner, more and more particles become outside of it and
71 * as particles are locks, they should be canceled.
72 *
73 * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com).
74 * Andreas Dilger (adilger@clusterfs.com) proposed few nice ideas like using
75 * LVF and many cleanups. Flow definition to allow more easy understanding of
76 * the logic belongs to Nikita Danilov (nikita@clusterfs.com) as well as many
77 * cleanups and fixes. And design and implementation are done by Yury Umanets
78 * (umka@clusterfs.com).
79 *
80 * Glossary for terms used:
81 *
82 * pl_limit - Number of allowed locks in pool. Applies to server and client
83 * side (tunable);
84 *
85 * pl_granted - Number of granted locks (calculated);
86 * pl_grant_rate - Number of granted locks for last T (calculated);
87 * pl_cancel_rate - Number of canceled locks for last T (calculated);
88 * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
89 * pl_grant_plan - Planned number of granted locks for next T (calculated);
90 * pl_server_lock_volume - Current server lock volume (calculated);
91 *
92 * As it may be seen from list above, we have few possible tunables which may
93 * affect behavior much. They all may be modified via proc. However, they also
94 * give a possibility for constructing few pre-defined behavior policies. If
95 * none of predefines is suitable for a working pattern being used, new one may
96 * be "constructed" via proc tunables.
97 */
98
99 #define DEBUG_SUBSYSTEM S_LDLM
100
101 #include "../include/lustre_dlm.h"
102 #include "../include/cl_object.h"
103 #include "../include/obd_class.h"
104 #include "../include/obd_support.h"
105 #include "ldlm_internal.h"
106
107
108 /*
109 * 50 ldlm locks for 1MB of RAM.
110 */
111 #define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50)
112
113 /*
114 * Maximal possible grant step plan in %.
115 */
116 #define LDLM_POOL_MAX_GSP (30)
117
118 /*
119 * Minimal possible grant step plan in %.
120 */
121 #define LDLM_POOL_MIN_GSP (1)
122
123 /*
124 * This controls the speed of reaching LDLM_POOL_MAX_GSP
125 * with increasing thread period.
126 */
127 #define LDLM_POOL_GSP_STEP_SHIFT (2)
128
129 /*
130 * LDLM_POOL_GSP% of all locks is default GP.
131 */
132 #define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
133
134 /*
135 * Max age for locks on clients.
136 */
137 #define LDLM_POOL_MAX_AGE (36000)
138
139 /*
140 * The granularity of SLV calculation.
141 */
142 #define LDLM_POOL_SLV_SHIFT (10)
143
dru(__u64 val,__u32 shift,int round_up)144 static inline __u64 dru(__u64 val, __u32 shift, int round_up)
145 {
146 return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
147 }
148
ldlm_pool_slv_max(__u32 L)149 static inline __u64 ldlm_pool_slv_max(__u32 L)
150 {
151 /*
152 * Allow to have all locks for 1 client for 10 hrs.
153 * Formula is the following: limit * 10h / 1 client.
154 */
155 __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
156 return lim;
157 }
158
ldlm_pool_slv_min(__u32 L)159 static inline __u64 ldlm_pool_slv_min(__u32 L)
160 {
161 return 1;
162 }
163
164 enum {
165 LDLM_POOL_FIRST_STAT = 0,
166 LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
167 LDLM_POOL_GRANT_STAT,
168 LDLM_POOL_CANCEL_STAT,
169 LDLM_POOL_GRANT_RATE_STAT,
170 LDLM_POOL_CANCEL_RATE_STAT,
171 LDLM_POOL_GRANT_PLAN_STAT,
172 LDLM_POOL_SLV_STAT,
173 LDLM_POOL_SHRINK_REQTD_STAT,
174 LDLM_POOL_SHRINK_FREED_STAT,
175 LDLM_POOL_RECALC_STAT,
176 LDLM_POOL_TIMING_STAT,
177 LDLM_POOL_LAST_STAT
178 };
179
ldlm_pl2ns(struct ldlm_pool * pl)180 static inline struct ldlm_namespace *ldlm_pl2ns(struct ldlm_pool *pl)
181 {
182 return container_of(pl, struct ldlm_namespace, ns_pool);
183 }
184
185 /**
186 * Calculates suggested grant_step in % of available locks for passed
187 * \a period. This is later used in grant_plan calculations.
188 */
ldlm_pool_t2gsp(unsigned int t)189 static inline int ldlm_pool_t2gsp(unsigned int t)
190 {
191 /*
192 * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
193 * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
194 *
195 * How this will affect execution is the following:
196 *
197 * - for thread period 1s we will have grant_step 1% which good from
198 * pov of taking some load off from server and push it out to clients.
199 * This is like that because 1% for grant_step means that server will
200 * not allow clients to get lots of locks in short period of time and
201 * keep all old locks in their caches. Clients will always have to
202 * get some locks back if they want to take some new;
203 *
204 * - for thread period 10s (which is default) we will have 23% which
205 * means that clients will have enough of room to take some new locks
206 * without getting some back. All locks from this 23% which were not
207 * taken by clients in current period will contribute in SLV growing.
208 * SLV growing means more locks cached on clients until limit or grant
209 * plan is reached.
210 */
211 return LDLM_POOL_MAX_GSP -
212 ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
213 (t >> LDLM_POOL_GSP_STEP_SHIFT));
214 }
215
216 /**
217 * Recalculates next grant limit on passed \a pl.
218 *
219 * \pre ->pl_lock is locked.
220 */
ldlm_pool_recalc_grant_plan(struct ldlm_pool * pl)221 static void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
222 {
223 int granted, grant_step, limit;
224
225 limit = ldlm_pool_get_limit(pl);
226 granted = atomic_read(&pl->pl_granted);
227
228 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
229 grant_step = ((limit - granted) * grant_step) / 100;
230 pl->pl_grant_plan = granted + grant_step;
231 limit = (limit * 5) >> 2;
232 if (pl->pl_grant_plan > limit)
233 pl->pl_grant_plan = limit;
234 }
235
236 /**
237 * Recalculates next SLV on passed \a pl.
238 *
239 * \pre ->pl_lock is locked.
240 */
ldlm_pool_recalc_slv(struct ldlm_pool * pl)241 static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
242 {
243 int granted;
244 int grant_plan;
245 int round_up;
246 __u64 slv;
247 __u64 slv_factor;
248 __u64 grant_usage;
249 __u32 limit;
250
251 slv = pl->pl_server_lock_volume;
252 grant_plan = pl->pl_grant_plan;
253 limit = ldlm_pool_get_limit(pl);
254 granted = atomic_read(&pl->pl_granted);
255 round_up = granted < limit;
256
257 grant_usage = max_t(int, limit - (granted - grant_plan), 1);
258
259 /*
260 * Find out SLV change factor which is the ratio of grant usage
261 * from limit. SLV changes as fast as the ratio of grant plan
262 * consumption. The more locks from grant plan are not consumed
263 * by clients in last interval (idle time), the faster grows
264 * SLV. And the opposite, the more grant plan is over-consumed
265 * (load time) the faster drops SLV.
266 */
267 slv_factor = grant_usage << LDLM_POOL_SLV_SHIFT;
268 do_div(slv_factor, limit);
269 slv = slv * slv_factor;
270 slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
271
272 if (slv > ldlm_pool_slv_max(limit))
273 slv = ldlm_pool_slv_max(limit);
274 else if (slv < ldlm_pool_slv_min(limit))
275 slv = ldlm_pool_slv_min(limit);
276
277 pl->pl_server_lock_volume = slv;
278 }
279
280 /**
281 * Recalculates next stats on passed \a pl.
282 *
283 * \pre ->pl_lock is locked.
284 */
ldlm_pool_recalc_stats(struct ldlm_pool * pl)285 static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
286 {
287 int grant_plan = pl->pl_grant_plan;
288 __u64 slv = pl->pl_server_lock_volume;
289 int granted = atomic_read(&pl->pl_granted);
290 int grant_rate = atomic_read(&pl->pl_grant_rate);
291 int cancel_rate = atomic_read(&pl->pl_cancel_rate);
292
293 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
294 slv);
295 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
296 granted);
297 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
298 grant_rate);
299 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
300 grant_plan);
301 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
302 cancel_rate);
303 }
304
305 /**
306 * Sets current SLV into obd accessible via ldlm_pl2ns(pl)->ns_obd.
307 */
ldlm_srv_pool_push_slv(struct ldlm_pool * pl)308 static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
309 {
310 struct obd_device *obd;
311
312 /*
313 * Set new SLV in obd field for using it later without accessing the
314 * pool. This is required to avoid race between sending reply to client
315 * with new SLV and cleanup server stack in which we can't guarantee
316 * that namespace is still alive. We know only that obd is alive as
317 * long as valid export is alive.
318 */
319 obd = ldlm_pl2ns(pl)->ns_obd;
320 LASSERT(obd != NULL);
321 write_lock(&obd->obd_pool_lock);
322 obd->obd_pool_slv = pl->pl_server_lock_volume;
323 write_unlock(&obd->obd_pool_lock);
324 }
325
326 /**
327 * Recalculates all pool fields on passed \a pl.
328 *
329 * \pre ->pl_lock is not locked.
330 */
ldlm_srv_pool_recalc(struct ldlm_pool * pl)331 static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
332 {
333 time_t recalc_interval_sec;
334
335 recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
336 if (recalc_interval_sec < pl->pl_recalc_period)
337 return 0;
338
339 spin_lock(&pl->pl_lock);
340 recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
341 if (recalc_interval_sec < pl->pl_recalc_period) {
342 spin_unlock(&pl->pl_lock);
343 return 0;
344 }
345 /*
346 * Recalc SLV after last period. This should be done
347 * _before_ recalculating new grant plan.
348 */
349 ldlm_pool_recalc_slv(pl);
350
351 /*
352 * Make sure that pool informed obd of last SLV changes.
353 */
354 ldlm_srv_pool_push_slv(pl);
355
356 /*
357 * Update grant_plan for new period.
358 */
359 ldlm_pool_recalc_grant_plan(pl);
360
361 pl->pl_recalc_time = get_seconds();
362 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
363 recalc_interval_sec);
364 spin_unlock(&pl->pl_lock);
365 return 0;
366 }
367
368 /**
369 * This function is used on server side as main entry point for memory
370 * pressure handling. It decreases SLV on \a pl according to passed
371 * \a nr and \a gfp_mask.
372 *
373 * Our goal here is to decrease SLV such a way that clients hold \a nr
374 * locks smaller in next 10h.
375 */
ldlm_srv_pool_shrink(struct ldlm_pool * pl,int nr,gfp_t gfp_mask)376 static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
377 int nr, gfp_t gfp_mask)
378 {
379 __u32 limit;
380
381 /*
382 * VM is asking how many entries may be potentially freed.
383 */
384 if (nr == 0)
385 return atomic_read(&pl->pl_granted);
386
387 /*
388 * Client already canceled locks but server is already in shrinker
389 * and can't cancel anything. Let's catch this race.
390 */
391 if (atomic_read(&pl->pl_granted) == 0)
392 return 0;
393
394 spin_lock(&pl->pl_lock);
395
396 /*
397 * We want shrinker to possibly cause cancellation of @nr locks from
398 * clients or grant approximately @nr locks smaller next intervals.
399 *
400 * This is why we decreased SLV by @nr. This effect will only be as
401 * long as one re-calc interval (1s these days) and this should be
402 * enough to pass this decreased SLV to all clients. On next recalc
403 * interval pool will either increase SLV if locks load is not high
404 * or will keep on same level or even decrease again, thus, shrinker
405 * decreased SLV will affect next recalc intervals and this way will
406 * make locking load lower.
407 */
408 if (nr < pl->pl_server_lock_volume) {
409 pl->pl_server_lock_volume = pl->pl_server_lock_volume - nr;
410 } else {
411 limit = ldlm_pool_get_limit(pl);
412 pl->pl_server_lock_volume = ldlm_pool_slv_min(limit);
413 }
414
415 /*
416 * Make sure that pool informed obd of last SLV changes.
417 */
418 ldlm_srv_pool_push_slv(pl);
419 spin_unlock(&pl->pl_lock);
420
421 /*
422 * We did not really free any memory here so far, it only will be
423 * freed later may be, so that we return 0 to not confuse VM.
424 */
425 return 0;
426 }
427
428 /**
429 * Setup server side pool \a pl with passed \a limit.
430 */
ldlm_srv_pool_setup(struct ldlm_pool * pl,int limit)431 static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
432 {
433 struct obd_device *obd;
434
435 obd = ldlm_pl2ns(pl)->ns_obd;
436 LASSERT(obd != NULL && obd != LP_POISON);
437 LASSERT(obd->obd_type != LP_POISON);
438 write_lock(&obd->obd_pool_lock);
439 obd->obd_pool_limit = limit;
440 write_unlock(&obd->obd_pool_lock);
441
442 ldlm_pool_set_limit(pl, limit);
443 return 0;
444 }
445
446 /**
447 * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl.
448 */
ldlm_cli_pool_pop_slv(struct ldlm_pool * pl)449 static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
450 {
451 struct obd_device *obd;
452
453 /*
454 * Get new SLV and Limit from obd which is updated with coming
455 * RPCs.
456 */
457 obd = ldlm_pl2ns(pl)->ns_obd;
458 LASSERT(obd != NULL);
459 read_lock(&obd->obd_pool_lock);
460 pl->pl_server_lock_volume = obd->obd_pool_slv;
461 ldlm_pool_set_limit(pl, obd->obd_pool_limit);
462 read_unlock(&obd->obd_pool_lock);
463 }
464
465 /**
466 * Recalculates client size pool \a pl according to current SLV and Limit.
467 */
ldlm_cli_pool_recalc(struct ldlm_pool * pl)468 static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
469 {
470 time_t recalc_interval_sec;
471 int ret;
472
473 recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
474 if (recalc_interval_sec < pl->pl_recalc_period)
475 return 0;
476
477 spin_lock(&pl->pl_lock);
478 /*
479 * Check if we need to recalc lists now.
480 */
481 recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
482 if (recalc_interval_sec < pl->pl_recalc_period) {
483 spin_unlock(&pl->pl_lock);
484 return 0;
485 }
486
487 /*
488 * Make sure that pool knows last SLV and Limit from obd.
489 */
490 ldlm_cli_pool_pop_slv(pl);
491
492 spin_unlock(&pl->pl_lock);
493
494 /*
495 * Do not cancel locks in case lru resize is disabled for this ns.
496 */
497 if (!ns_connect_lru_resize(ldlm_pl2ns(pl))) {
498 ret = 0;
499 goto out;
500 }
501
502 /*
503 * In the time of canceling locks on client we do not need to maintain
504 * sharp timing, we only want to cancel locks asap according to new SLV.
505 * It may be called when SLV has changed much, this is why we do not
506 * take into account pl->pl_recalc_time here.
507 */
508 ret = ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, LDLM_CANCEL_LRUR);
509
510 out:
511 spin_lock(&pl->pl_lock);
512 /*
513 * Time of LRU resizing might be longer than period,
514 * so update after LRU resizing rather than before it.
515 */
516 pl->pl_recalc_time = get_seconds();
517 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
518 recalc_interval_sec);
519 spin_unlock(&pl->pl_lock);
520 return ret;
521 }
522
523 /**
524 * This function is main entry point for memory pressure handling on client
525 * side. Main goal of this function is to cancel some number of locks on
526 * passed \a pl according to \a nr and \a gfp_mask.
527 */
ldlm_cli_pool_shrink(struct ldlm_pool * pl,int nr,gfp_t gfp_mask)528 static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
529 int nr, gfp_t gfp_mask)
530 {
531 struct ldlm_namespace *ns;
532 int unused;
533
534 ns = ldlm_pl2ns(pl);
535
536 /*
537 * Do not cancel locks in case lru resize is disabled for this ns.
538 */
539 if (!ns_connect_lru_resize(ns))
540 return 0;
541
542 /*
543 * Make sure that pool knows last SLV and Limit from obd.
544 */
545 ldlm_cli_pool_pop_slv(pl);
546
547 spin_lock(&ns->ns_lock);
548 unused = ns->ns_nr_unused;
549 spin_unlock(&ns->ns_lock);
550
551 if (nr == 0)
552 return (unused / 100) * sysctl_vfs_cache_pressure;
553 else
554 return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
555 }
556
557 static const struct ldlm_pool_ops ldlm_srv_pool_ops = {
558 .po_recalc = ldlm_srv_pool_recalc,
559 .po_shrink = ldlm_srv_pool_shrink,
560 .po_setup = ldlm_srv_pool_setup
561 };
562
563 static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
564 .po_recalc = ldlm_cli_pool_recalc,
565 .po_shrink = ldlm_cli_pool_shrink
566 };
567
568 /**
569 * Pool recalc wrapper. Will call either client or server pool recalc callback
570 * depending what pool \a pl is used.
571 */
ldlm_pool_recalc(struct ldlm_pool * pl)572 int ldlm_pool_recalc(struct ldlm_pool *pl)
573 {
574 time_t recalc_interval_sec;
575 int count;
576
577 recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
578 if (recalc_interval_sec <= 0)
579 goto recalc;
580
581 spin_lock(&pl->pl_lock);
582 if (recalc_interval_sec > 0) {
583 /*
584 * Update pool statistics every 1s.
585 */
586 ldlm_pool_recalc_stats(pl);
587
588 /*
589 * Zero out all rates and speed for the last period.
590 */
591 atomic_set(&pl->pl_grant_rate, 0);
592 atomic_set(&pl->pl_cancel_rate, 0);
593 }
594 spin_unlock(&pl->pl_lock);
595
596 recalc:
597 if (pl->pl_ops->po_recalc != NULL) {
598 count = pl->pl_ops->po_recalc(pl);
599 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
600 count);
601 }
602 recalc_interval_sec = pl->pl_recalc_time - get_seconds() +
603 pl->pl_recalc_period;
604 if (recalc_interval_sec <= 0) {
605 /* Prevent too frequent recalculation. */
606 CDEBUG(D_DLMTRACE, "Negative interval(%ld), "
607 "too short period(%ld)",
608 recalc_interval_sec,
609 pl->pl_recalc_period);
610 recalc_interval_sec = 1;
611 }
612
613 return recalc_interval_sec;
614 }
615
616 /*
617 * Pool shrink wrapper. Will call either client or server pool recalc callback
618 * depending what pool pl is used. When nr == 0, just return the number of
619 * freeable locks. Otherwise, return the number of canceled locks.
620 */
ldlm_pool_shrink(struct ldlm_pool * pl,int nr,gfp_t gfp_mask)621 int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
622 gfp_t gfp_mask)
623 {
624 int cancel = 0;
625
626 if (pl->pl_ops->po_shrink != NULL) {
627 cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
628 if (nr > 0) {
629 lprocfs_counter_add(pl->pl_stats,
630 LDLM_POOL_SHRINK_REQTD_STAT,
631 nr);
632 lprocfs_counter_add(pl->pl_stats,
633 LDLM_POOL_SHRINK_FREED_STAT,
634 cancel);
635 CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, shrunk %d\n",
636 pl->pl_name, nr, cancel);
637 }
638 }
639 return cancel;
640 }
641 EXPORT_SYMBOL(ldlm_pool_shrink);
642
643 /**
644 * Pool setup wrapper. Will call either client or server pool recalc callback
645 * depending what pool \a pl is used.
646 *
647 * Sets passed \a limit into pool \a pl.
648 */
ldlm_pool_setup(struct ldlm_pool * pl,int limit)649 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
650 {
651 if (pl->pl_ops->po_setup != NULL)
652 return pl->pl_ops->po_setup(pl, limit);
653 return 0;
654 }
655 EXPORT_SYMBOL(ldlm_pool_setup);
656
657 #if defined(CONFIG_PROC_FS)
lprocfs_pool_state_seq_show(struct seq_file * m,void * unused)658 static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
659 {
660 int granted, grant_rate, cancel_rate, grant_step;
661 int grant_speed, grant_plan, lvf;
662 struct ldlm_pool *pl = m->private;
663 __u64 slv, clv;
664 __u32 limit;
665
666 spin_lock(&pl->pl_lock);
667 slv = pl->pl_server_lock_volume;
668 clv = pl->pl_client_lock_volume;
669 limit = ldlm_pool_get_limit(pl);
670 grant_plan = pl->pl_grant_plan;
671 granted = atomic_read(&pl->pl_granted);
672 grant_rate = atomic_read(&pl->pl_grant_rate);
673 cancel_rate = atomic_read(&pl->pl_cancel_rate);
674 grant_speed = grant_rate - cancel_rate;
675 lvf = atomic_read(&pl->pl_lock_volume_factor);
676 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
677 spin_unlock(&pl->pl_lock);
678
679 seq_printf(m, "LDLM pool state (%s):\n"
680 " SLV: %llu\n"
681 " CLV: %llu\n"
682 " LVF: %d\n",
683 pl->pl_name, slv, clv, lvf);
684
685 if (ns_is_server(ldlm_pl2ns(pl))) {
686 seq_printf(m, " GSP: %d%%\n"
687 " GP: %d\n",
688 grant_step, grant_plan);
689 }
690 seq_printf(m, " GR: %d\n CR: %d\n GS: %d\n"
691 " G: %d\n L: %d\n",
692 grant_rate, cancel_rate, grant_speed,
693 granted, limit);
694
695 return 0;
696 }
697 LPROC_SEQ_FOPS_RO(lprocfs_pool_state);
698
lprocfs_grant_speed_seq_show(struct seq_file * m,void * unused)699 static int lprocfs_grant_speed_seq_show(struct seq_file *m, void *unused)
700 {
701 struct ldlm_pool *pl = m->private;
702 int grant_speed;
703
704 spin_lock(&pl->pl_lock);
705 /* serialize with ldlm_pool_recalc */
706 grant_speed = atomic_read(&pl->pl_grant_rate) -
707 atomic_read(&pl->pl_cancel_rate);
708 spin_unlock(&pl->pl_lock);
709 return lprocfs_rd_uint(m, &grant_speed);
710 }
711
712 LDLM_POOL_PROC_READER_SEQ_SHOW(grant_plan, int);
713 LPROC_SEQ_FOPS_RO(lprocfs_grant_plan);
714
715 LDLM_POOL_PROC_READER_SEQ_SHOW(recalc_period, int);
716 LDLM_POOL_PROC_WRITER(recalc_period, int);
lprocfs_recalc_period_seq_write(struct file * file,const char __user * buf,size_t len,loff_t * off)717 static ssize_t lprocfs_recalc_period_seq_write(struct file *file,
718 const char __user *buf,
719 size_t len, loff_t *off)
720 {
721 struct seq_file *seq = file->private_data;
722
723 return lprocfs_wr_recalc_period(file, buf, len, seq->private);
724 }
725 LPROC_SEQ_FOPS(lprocfs_recalc_period);
726
727 LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, u64);
728 LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, atomic);
729 LPROC_SEQ_FOPS_RW_TYPE(ldlm_pool_rw, atomic);
730
731 LPROC_SEQ_FOPS_RO(lprocfs_grant_speed);
732
733 #define LDLM_POOL_ADD_VAR(name, var, ops) \
734 do { \
735 snprintf(var_name, MAX_STRING_SIZE, #name); \
736 pool_vars[0].data = var; \
737 pool_vars[0].fops = ops; \
738 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, NULL);\
739 } while (0)
740
ldlm_pool_proc_init(struct ldlm_pool * pl)741 static int ldlm_pool_proc_init(struct ldlm_pool *pl)
742 {
743 struct ldlm_namespace *ns = ldlm_pl2ns(pl);
744 struct proc_dir_entry *parent_ns_proc;
745 struct lprocfs_vars pool_vars[2];
746 char *var_name = NULL;
747 int rc = 0;
748
749 OBD_ALLOC(var_name, MAX_STRING_SIZE + 1);
750 if (!var_name)
751 return -ENOMEM;
752
753 parent_ns_proc = ns->ns_proc_dir_entry;
754 if (parent_ns_proc == NULL) {
755 CERROR("%s: proc entry is not initialized\n",
756 ldlm_ns_name(ns));
757 rc = -EINVAL;
758 goto out_free_name;
759 }
760 pl->pl_proc_dir = lprocfs_register("pool", parent_ns_proc,
761 NULL, NULL);
762 if (IS_ERR(pl->pl_proc_dir)) {
763 CERROR("LProcFS failed in ldlm-pool-init\n");
764 rc = PTR_ERR(pl->pl_proc_dir);
765 pl->pl_proc_dir = NULL;
766 goto out_free_name;
767 }
768
769 var_name[MAX_STRING_SIZE] = '\0';
770 memset(pool_vars, 0, sizeof(pool_vars));
771 pool_vars[0].name = var_name;
772
773 LDLM_POOL_ADD_VAR("server_lock_volume", &pl->pl_server_lock_volume,
774 &ldlm_pool_u64_fops);
775 LDLM_POOL_ADD_VAR("limit", &pl->pl_limit, &ldlm_pool_rw_atomic_fops);
776 LDLM_POOL_ADD_VAR("granted", &pl->pl_granted, &ldlm_pool_atomic_fops);
777 LDLM_POOL_ADD_VAR("grant_speed", pl, &lprocfs_grant_speed_fops);
778 LDLM_POOL_ADD_VAR("cancel_rate", &pl->pl_cancel_rate,
779 &ldlm_pool_atomic_fops);
780 LDLM_POOL_ADD_VAR("grant_rate", &pl->pl_grant_rate,
781 &ldlm_pool_atomic_fops);
782 LDLM_POOL_ADD_VAR("grant_plan", pl, &lprocfs_grant_plan_fops);
783 LDLM_POOL_ADD_VAR("recalc_period", pl, &lprocfs_recalc_period_fops);
784 LDLM_POOL_ADD_VAR("lock_volume_factor", &pl->pl_lock_volume_factor,
785 &ldlm_pool_rw_atomic_fops);
786 LDLM_POOL_ADD_VAR("state", pl, &lprocfs_pool_state_fops);
787
788 pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
789 LDLM_POOL_FIRST_STAT, 0);
790 if (!pl->pl_stats) {
791 rc = -ENOMEM;
792 goto out_free_name;
793 }
794
795 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
796 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
797 "granted", "locks");
798 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
799 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
800 "grant", "locks");
801 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
802 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
803 "cancel", "locks");
804 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
805 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
806 "grant_rate", "locks/s");
807 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
808 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
809 "cancel_rate", "locks/s");
810 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
811 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
812 "grant_plan", "locks/s");
813 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,
814 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
815 "slv", "slv");
816 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
817 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
818 "shrink_request", "locks");
819 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
820 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
821 "shrink_freed", "locks");
822 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
823 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
824 "recalc_freed", "locks");
825 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
826 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
827 "recalc_timing", "sec");
828 rc = lprocfs_register_stats(pl->pl_proc_dir, "stats", pl->pl_stats);
829
830 out_free_name:
831 OBD_FREE(var_name, MAX_STRING_SIZE + 1);
832 return rc;
833 }
834
ldlm_pool_proc_fini(struct ldlm_pool * pl)835 static void ldlm_pool_proc_fini(struct ldlm_pool *pl)
836 {
837 if (pl->pl_stats != NULL) {
838 lprocfs_free_stats(&pl->pl_stats);
839 pl->pl_stats = NULL;
840 }
841 if (pl->pl_proc_dir != NULL) {
842 lprocfs_remove(&pl->pl_proc_dir);
843 pl->pl_proc_dir = NULL;
844 }
845 }
846 #else /* !CONFIG_PROC_FS */
ldlm_pool_proc_init(struct ldlm_pool * pl)847 static int ldlm_pool_proc_init(struct ldlm_pool *pl)
848 {
849 return 0;
850 }
851
ldlm_pool_proc_fini(struct ldlm_pool * pl)852 static void ldlm_pool_proc_fini(struct ldlm_pool *pl) {}
853 #endif /* CONFIG_PROC_FS */
854
ldlm_pool_init(struct ldlm_pool * pl,struct ldlm_namespace * ns,int idx,ldlm_side_t client)855 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
856 int idx, ldlm_side_t client)
857 {
858 int rc;
859
860 spin_lock_init(&pl->pl_lock);
861 atomic_set(&pl->pl_granted, 0);
862 pl->pl_recalc_time = get_seconds();
863 atomic_set(&pl->pl_lock_volume_factor, 1);
864
865 atomic_set(&pl->pl_grant_rate, 0);
866 atomic_set(&pl->pl_cancel_rate, 0);
867 pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
868
869 snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
870 ldlm_ns_name(ns), idx);
871
872 if (client == LDLM_NAMESPACE_SERVER) {
873 pl->pl_ops = &ldlm_srv_pool_ops;
874 ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L);
875 pl->pl_recalc_period = LDLM_POOL_SRV_DEF_RECALC_PERIOD;
876 pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
877 } else {
878 ldlm_pool_set_limit(pl, 1);
879 pl->pl_server_lock_volume = 0;
880 pl->pl_ops = &ldlm_cli_pool_ops;
881 pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
882 }
883 pl->pl_client_lock_volume = 0;
884 rc = ldlm_pool_proc_init(pl);
885 if (rc)
886 return rc;
887
888 CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
889
890 return rc;
891 }
892 EXPORT_SYMBOL(ldlm_pool_init);
893
ldlm_pool_fini(struct ldlm_pool * pl)894 void ldlm_pool_fini(struct ldlm_pool *pl)
895 {
896 ldlm_pool_proc_fini(pl);
897
898 /*
899 * Pool should not be used after this point. We can't free it here as
900 * it lives in struct ldlm_namespace, but still interested in catching
901 * any abnormal using cases.
902 */
903 POISON(pl, 0x5a, sizeof(*pl));
904 }
905 EXPORT_SYMBOL(ldlm_pool_fini);
906
907 /**
908 * Add new taken ldlm lock \a lock into pool \a pl accounting.
909 */
ldlm_pool_add(struct ldlm_pool * pl,struct ldlm_lock * lock)910 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
911 {
912 /*
913 * FLOCK locks are special in a sense that they are almost never
914 * cancelled, instead special kind of lock is used to drop them.
915 * also there is no LRU for flock locks, so no point in tracking
916 * them anyway.
917 */
918 if (lock->l_resource->lr_type == LDLM_FLOCK)
919 return;
920
921 atomic_inc(&pl->pl_granted);
922 atomic_inc(&pl->pl_grant_rate);
923 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
924 /*
925 * Do not do pool recalc for client side as all locks which
926 * potentially may be canceled has already been packed into
927 * enqueue/cancel rpc. Also we do not want to run out of stack
928 * with too long call paths.
929 */
930 if (ns_is_server(ldlm_pl2ns(pl)))
931 ldlm_pool_recalc(pl);
932 }
933 EXPORT_SYMBOL(ldlm_pool_add);
934
935 /**
936 * Remove ldlm lock \a lock from pool \a pl accounting.
937 */
ldlm_pool_del(struct ldlm_pool * pl,struct ldlm_lock * lock)938 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
939 {
940 /*
941 * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
942 */
943 if (lock->l_resource->lr_type == LDLM_FLOCK)
944 return;
945
946 LASSERT(atomic_read(&pl->pl_granted) > 0);
947 atomic_dec(&pl->pl_granted);
948 atomic_inc(&pl->pl_cancel_rate);
949
950 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
951
952 if (ns_is_server(ldlm_pl2ns(pl)))
953 ldlm_pool_recalc(pl);
954 }
955 EXPORT_SYMBOL(ldlm_pool_del);
956
957 /**
958 * Returns current \a pl SLV.
959 *
960 * \pre ->pl_lock is not locked.
961 */
ldlm_pool_get_slv(struct ldlm_pool * pl)962 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
963 {
964 __u64 slv;
965
966 spin_lock(&pl->pl_lock);
967 slv = pl->pl_server_lock_volume;
968 spin_unlock(&pl->pl_lock);
969 return slv;
970 }
971 EXPORT_SYMBOL(ldlm_pool_get_slv);
972
973 /**
974 * Sets passed \a slv to \a pl.
975 *
976 * \pre ->pl_lock is not locked.
977 */
ldlm_pool_set_slv(struct ldlm_pool * pl,__u64 slv)978 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
979 {
980 spin_lock(&pl->pl_lock);
981 pl->pl_server_lock_volume = slv;
982 spin_unlock(&pl->pl_lock);
983 }
984 EXPORT_SYMBOL(ldlm_pool_set_slv);
985
986 /**
987 * Returns current \a pl CLV.
988 *
989 * \pre ->pl_lock is not locked.
990 */
ldlm_pool_get_clv(struct ldlm_pool * pl)991 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
992 {
993 __u64 slv;
994
995 spin_lock(&pl->pl_lock);
996 slv = pl->pl_client_lock_volume;
997 spin_unlock(&pl->pl_lock);
998 return slv;
999 }
1000 EXPORT_SYMBOL(ldlm_pool_get_clv);
1001
1002 /**
1003 * Sets passed \a clv to \a pl.
1004 *
1005 * \pre ->pl_lock is not locked.
1006 */
ldlm_pool_set_clv(struct ldlm_pool * pl,__u64 clv)1007 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
1008 {
1009 spin_lock(&pl->pl_lock);
1010 pl->pl_client_lock_volume = clv;
1011 spin_unlock(&pl->pl_lock);
1012 }
1013 EXPORT_SYMBOL(ldlm_pool_set_clv);
1014
1015 /**
1016 * Returns current \a pl limit.
1017 */
ldlm_pool_get_limit(struct ldlm_pool * pl)1018 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
1019 {
1020 return atomic_read(&pl->pl_limit);
1021 }
1022 EXPORT_SYMBOL(ldlm_pool_get_limit);
1023
1024 /**
1025 * Sets passed \a limit to \a pl.
1026 */
ldlm_pool_set_limit(struct ldlm_pool * pl,__u32 limit)1027 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
1028 {
1029 atomic_set(&pl->pl_limit, limit);
1030 }
1031 EXPORT_SYMBOL(ldlm_pool_set_limit);
1032
1033 /**
1034 * Returns current LVF from \a pl.
1035 */
ldlm_pool_get_lvf(struct ldlm_pool * pl)1036 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
1037 {
1038 return atomic_read(&pl->pl_lock_volume_factor);
1039 }
1040 EXPORT_SYMBOL(ldlm_pool_get_lvf);
1041
ldlm_pool_granted(struct ldlm_pool * pl)1042 static int ldlm_pool_granted(struct ldlm_pool *pl)
1043 {
1044 return atomic_read(&pl->pl_granted);
1045 }
1046
1047 static struct ptlrpc_thread *ldlm_pools_thread;
1048 static struct completion ldlm_pools_comp;
1049
1050 /*
1051 * count locks from all namespaces (if possible). Returns number of
1052 * cached locks.
1053 */
ldlm_pools_count(ldlm_side_t client,gfp_t gfp_mask)1054 static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
1055 {
1056 int total = 0, nr_ns;
1057 struct ldlm_namespace *ns;
1058 struct ldlm_namespace *ns_old = NULL; /* loop detection */
1059 void *cookie;
1060
1061 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
1062 return 0;
1063
1064 CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
1065 client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
1066
1067 cookie = cl_env_reenter();
1068
1069 /*
1070 * Find out how many resources we may release.
1071 */
1072 for (nr_ns = ldlm_namespace_nr_read(client);
1073 nr_ns > 0; nr_ns--) {
1074 mutex_lock(ldlm_namespace_lock(client));
1075 if (list_empty(ldlm_namespace_list(client))) {
1076 mutex_unlock(ldlm_namespace_lock(client));
1077 cl_env_reexit(cookie);
1078 return 0;
1079 }
1080 ns = ldlm_namespace_first_locked(client);
1081
1082 if (ns == ns_old) {
1083 mutex_unlock(ldlm_namespace_lock(client));
1084 break;
1085 }
1086
1087 if (ldlm_ns_empty(ns)) {
1088 ldlm_namespace_move_to_inactive_locked(ns, client);
1089 mutex_unlock(ldlm_namespace_lock(client));
1090 continue;
1091 }
1092
1093 if (ns_old == NULL)
1094 ns_old = ns;
1095
1096 ldlm_namespace_get(ns);
1097 ldlm_namespace_move_to_active_locked(ns, client);
1098 mutex_unlock(ldlm_namespace_lock(client));
1099 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
1100 ldlm_namespace_put(ns);
1101 }
1102
1103 cl_env_reexit(cookie);
1104 return total;
1105 }
1106
ldlm_pools_scan(ldlm_side_t client,int nr,gfp_t gfp_mask)1107 static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
1108 {
1109 unsigned long freed = 0;
1110 int tmp, nr_ns;
1111 struct ldlm_namespace *ns;
1112 void *cookie;
1113
1114 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
1115 return -1;
1116
1117 cookie = cl_env_reenter();
1118
1119 /*
1120 * Shrink at least ldlm_namespace_nr_read(client) namespaces.
1121 */
1122 for (tmp = nr_ns = ldlm_namespace_nr_read(client);
1123 tmp > 0; tmp--) {
1124 int cancel, nr_locks;
1125
1126 /*
1127 * Do not call shrink under ldlm_namespace_lock(client)
1128 */
1129 mutex_lock(ldlm_namespace_lock(client));
1130 if (list_empty(ldlm_namespace_list(client))) {
1131 mutex_unlock(ldlm_namespace_lock(client));
1132 break;
1133 }
1134 ns = ldlm_namespace_first_locked(client);
1135 ldlm_namespace_get(ns);
1136 ldlm_namespace_move_to_active_locked(ns, client);
1137 mutex_unlock(ldlm_namespace_lock(client));
1138
1139 nr_locks = ldlm_pool_granted(&ns->ns_pool);
1140 /*
1141 * We use to shrink propotionally but with new shrinker API,
1142 * we lost the total number of freeable locks.
1143 */
1144 cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
1145 freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
1146 ldlm_namespace_put(ns);
1147 }
1148 cl_env_reexit(cookie);
1149 /*
1150 * we only decrease the SLV in server pools shrinker, return
1151 * SHRINK_STOP to kernel to avoid needless loop. LU-1128
1152 */
1153 return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
1154 }
1155
ldlm_pools_srv_count(struct shrinker * s,struct shrink_control * sc)1156 static unsigned long ldlm_pools_srv_count(struct shrinker *s,
1157 struct shrink_control *sc)
1158 {
1159 return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
1160 }
1161
ldlm_pools_srv_scan(struct shrinker * s,struct shrink_control * sc)1162 static unsigned long ldlm_pools_srv_scan(struct shrinker *s,
1163 struct shrink_control *sc)
1164 {
1165 return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
1166 sc->gfp_mask);
1167 }
1168
ldlm_pools_cli_count(struct shrinker * s,struct shrink_control * sc)1169 static unsigned long ldlm_pools_cli_count(struct shrinker *s,
1170 struct shrink_control *sc)
1171 {
1172 return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
1173 }
1174
ldlm_pools_cli_scan(struct shrinker * s,struct shrink_control * sc)1175 static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
1176 struct shrink_control *sc)
1177 {
1178 return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
1179 sc->gfp_mask);
1180 }
1181
ldlm_pools_recalc(ldlm_side_t client)1182 int ldlm_pools_recalc(ldlm_side_t client)
1183 {
1184 __u32 nr_l = 0, nr_p = 0, l;
1185 struct ldlm_namespace *ns;
1186 struct ldlm_namespace *ns_old = NULL;
1187 int nr, equal = 0;
1188 int time = 50; /* seconds of sleep if no active namespaces */
1189
1190 /*
1191 * No need to setup pool limit for client pools.
1192 */
1193 if (client == LDLM_NAMESPACE_SERVER) {
1194 /*
1195 * Check all modest namespaces first.
1196 */
1197 mutex_lock(ldlm_namespace_lock(client));
1198 list_for_each_entry(ns, ldlm_namespace_list(client),
1199 ns_list_chain) {
1200 if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
1201 continue;
1202
1203 l = ldlm_pool_granted(&ns->ns_pool);
1204 if (l == 0)
1205 l = 1;
1206
1207 /*
1208 * Set the modest pools limit equal to their avg granted
1209 * locks + ~6%.
1210 */
1211 l += dru(l, LDLM_POOLS_MODEST_MARGIN_SHIFT, 0);
1212 ldlm_pool_setup(&ns->ns_pool, l);
1213 nr_l += l;
1214 nr_p++;
1215 }
1216
1217 /*
1218 * Make sure that modest namespaces did not eat more that 2/3
1219 * of limit.
1220 */
1221 if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
1222 CWARN("\"Modest\" pools eat out 2/3 of server locks limit (%d of %lu). This means that you have too many clients for this amount of server RAM. Upgrade server!\n",
1223 nr_l, LDLM_POOL_HOST_L);
1224 equal = 1;
1225 }
1226
1227 /*
1228 * The rest is given to greedy namespaces.
1229 */
1230 list_for_each_entry(ns, ldlm_namespace_list(client),
1231 ns_list_chain) {
1232 if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
1233 continue;
1234
1235 if (equal) {
1236 /*
1237 * In the case 2/3 locks are eaten out by
1238 * modest pools, we re-setup equal limit
1239 * for _all_ pools.
1240 */
1241 l = LDLM_POOL_HOST_L /
1242 ldlm_namespace_nr_read(client);
1243 } else {
1244 /*
1245 * All the rest of greedy pools will have
1246 * all locks in equal parts.
1247 */
1248 l = (LDLM_POOL_HOST_L - nr_l) /
1249 (ldlm_namespace_nr_read(client) -
1250 nr_p);
1251 }
1252 ldlm_pool_setup(&ns->ns_pool, l);
1253 }
1254 mutex_unlock(ldlm_namespace_lock(client));
1255 }
1256
1257 /*
1258 * Recalc at least ldlm_namespace_nr_read(client) namespaces.
1259 */
1260 for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) {
1261 int skip;
1262 /*
1263 * Lock the list, get first @ns in the list, getref, move it
1264 * to the tail, unlock and call pool recalc. This way we avoid
1265 * calling recalc under @ns lock what is really good as we get
1266 * rid of potential deadlock on client nodes when canceling
1267 * locks synchronously.
1268 */
1269 mutex_lock(ldlm_namespace_lock(client));
1270 if (list_empty(ldlm_namespace_list(client))) {
1271 mutex_unlock(ldlm_namespace_lock(client));
1272 break;
1273 }
1274 ns = ldlm_namespace_first_locked(client);
1275
1276 if (ns_old == ns) { /* Full pass complete */
1277 mutex_unlock(ldlm_namespace_lock(client));
1278 break;
1279 }
1280
1281 /* We got an empty namespace, need to move it back to inactive
1282 * list.
1283 * The race with parallel resource creation is fine:
1284 * - If they do namespace_get before our check, we fail the
1285 * check and they move this item to the end of the list anyway
1286 * - If we do the check and then they do namespace_get, then
1287 * we move the namespace to inactive and they will move
1288 * it back to active (synchronised by the lock, so no clash
1289 * there).
1290 */
1291 if (ldlm_ns_empty(ns)) {
1292 ldlm_namespace_move_to_inactive_locked(ns, client);
1293 mutex_unlock(ldlm_namespace_lock(client));
1294 continue;
1295 }
1296
1297 if (ns_old == NULL)
1298 ns_old = ns;
1299
1300 spin_lock(&ns->ns_lock);
1301 /*
1302 * skip ns which is being freed, and we don't want to increase
1303 * its refcount again, not even temporarily. bz21519 & LU-499.
1304 */
1305 if (ns->ns_stopping) {
1306 skip = 1;
1307 } else {
1308 skip = 0;
1309 ldlm_namespace_get(ns);
1310 }
1311 spin_unlock(&ns->ns_lock);
1312
1313 ldlm_namespace_move_to_active_locked(ns, client);
1314 mutex_unlock(ldlm_namespace_lock(client));
1315
1316 /*
1317 * After setup is done - recalc the pool.
1318 */
1319 if (!skip) {
1320 int ttime = ldlm_pool_recalc(&ns->ns_pool);
1321
1322 if (ttime < time)
1323 time = ttime;
1324
1325 ldlm_namespace_put(ns);
1326 }
1327 }
1328 return time;
1329 }
1330 EXPORT_SYMBOL(ldlm_pools_recalc);
1331
ldlm_pools_thread_main(void * arg)1332 static int ldlm_pools_thread_main(void *arg)
1333 {
1334 struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
1335 int s_time, c_time;
1336
1337 thread_set_flags(thread, SVC_RUNNING);
1338 wake_up(&thread->t_ctl_waitq);
1339
1340 CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
1341 "ldlm_poold", current_pid());
1342
1343 while (1) {
1344 struct l_wait_info lwi;
1345
1346 /*
1347 * Recal all pools on this tick.
1348 */
1349 s_time = ldlm_pools_recalc(LDLM_NAMESPACE_SERVER);
1350 c_time = ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
1351
1352 /*
1353 * Wait until the next check time, or until we're
1354 * stopped.
1355 */
1356 lwi = LWI_TIMEOUT(cfs_time_seconds(min(s_time, c_time)),
1357 NULL, NULL);
1358 l_wait_event(thread->t_ctl_waitq,
1359 thread_is_stopping(thread) ||
1360 thread_is_event(thread),
1361 &lwi);
1362
1363 if (thread_test_and_clear_flags(thread, SVC_STOPPING))
1364 break;
1365 else
1366 thread_test_and_clear_flags(thread, SVC_EVENT);
1367 }
1368
1369 thread_set_flags(thread, SVC_STOPPED);
1370 wake_up(&thread->t_ctl_waitq);
1371
1372 CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
1373 "ldlm_poold", current_pid());
1374
1375 complete_and_exit(&ldlm_pools_comp, 0);
1376 }
1377
ldlm_pools_thread_start(void)1378 static int ldlm_pools_thread_start(void)
1379 {
1380 struct l_wait_info lwi = { 0 };
1381 struct task_struct *task;
1382
1383 if (ldlm_pools_thread != NULL)
1384 return -EALREADY;
1385
1386 OBD_ALLOC_PTR(ldlm_pools_thread);
1387 if (ldlm_pools_thread == NULL)
1388 return -ENOMEM;
1389
1390 init_completion(&ldlm_pools_comp);
1391 init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq);
1392
1393 task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread,
1394 "ldlm_poold");
1395 if (IS_ERR(task)) {
1396 CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task));
1397 OBD_FREE(ldlm_pools_thread, sizeof(*ldlm_pools_thread));
1398 ldlm_pools_thread = NULL;
1399 return PTR_ERR(task);
1400 }
1401 l_wait_event(ldlm_pools_thread->t_ctl_waitq,
1402 thread_is_running(ldlm_pools_thread), &lwi);
1403 return 0;
1404 }
1405
ldlm_pools_thread_stop(void)1406 static void ldlm_pools_thread_stop(void)
1407 {
1408 if (ldlm_pools_thread == NULL)
1409 return;
1410
1411 thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
1412 wake_up(&ldlm_pools_thread->t_ctl_waitq);
1413
1414 /*
1415 * Make sure that pools thread is finished before freeing @thread.
1416 * This fixes possible race and oops due to accessing freed memory
1417 * in pools thread.
1418 */
1419 wait_for_completion(&ldlm_pools_comp);
1420 OBD_FREE_PTR(ldlm_pools_thread);
1421 ldlm_pools_thread = NULL;
1422 }
1423
1424 static struct shrinker ldlm_pools_srv_shrinker = {
1425 .count_objects = ldlm_pools_srv_count,
1426 .scan_objects = ldlm_pools_srv_scan,
1427 .seeks = DEFAULT_SEEKS,
1428 };
1429
1430 static struct shrinker ldlm_pools_cli_shrinker = {
1431 .count_objects = ldlm_pools_cli_count,
1432 .scan_objects = ldlm_pools_cli_scan,
1433 .seeks = DEFAULT_SEEKS,
1434 };
1435
ldlm_pools_init(void)1436 int ldlm_pools_init(void)
1437 {
1438 int rc;
1439
1440 rc = ldlm_pools_thread_start();
1441 if (rc == 0) {
1442 register_shrinker(&ldlm_pools_srv_shrinker);
1443 register_shrinker(&ldlm_pools_cli_shrinker);
1444 }
1445 return rc;
1446 }
1447 EXPORT_SYMBOL(ldlm_pools_init);
1448
ldlm_pools_fini(void)1449 void ldlm_pools_fini(void)
1450 {
1451 unregister_shrinker(&ldlm_pools_srv_shrinker);
1452 unregister_shrinker(&ldlm_pools_cli_shrinker);
1453 ldlm_pools_thread_stop();
1454 }
1455 EXPORT_SYMBOL(ldlm_pools_fini);
1456