1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/recover.c
37  *
38  * Author: Mike Shaver <shaver@clusterfs.com>
39  */
40 
41 #define DEBUG_SUBSYSTEM S_RPC
42 #include "../../include/linux/libcfs/libcfs.h"
43 
44 #include "../include/obd_support.h"
45 #include "../include/lustre_ha.h"
46 #include "../include/lustre_net.h"
47 #include "../include/lustre_import.h"
48 #include "../include/lustre_export.h"
49 #include "../include/obd.h"
50 #include "../include/obd_class.h"
51 #include <linux/list.h>
52 
53 #include "ptlrpc_internal.h"
54 
55 /**
56  * Start recovery on disconnected import.
57  * This is done by just attempting a connect
58  */
ptlrpc_initiate_recovery(struct obd_import * imp)59 void ptlrpc_initiate_recovery(struct obd_import *imp)
60 {
61 	CDEBUG(D_HA, "%s: starting recovery\n", obd2cli_tgt(imp->imp_obd));
62 	ptlrpc_connect_import(imp);
63 }
64 
65 /**
66  * Identify what request from replay list needs to be replayed next
67  * (based on what we have already replayed) and send it to server.
68  */
ptlrpc_replay_next(struct obd_import * imp,int * inflight)69 int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
70 {
71 	int rc = 0;
72 	struct list_head *tmp, *pos;
73 	struct ptlrpc_request *req = NULL;
74 	__u64 last_transno;
75 
76 	*inflight = 0;
77 
78 	/* It might have committed some after we last spoke, so make sure we
79 	 * get rid of them now.
80 	 */
81 	spin_lock(&imp->imp_lock);
82 	imp->imp_last_transno_checked = 0;
83 	ptlrpc_free_committed(imp);
84 	last_transno = imp->imp_last_replay_transno;
85 	spin_unlock(&imp->imp_lock);
86 
87 	CDEBUG(D_HA, "import %p from %s committed %llu last %llu\n",
88 	       imp, obd2cli_tgt(imp->imp_obd),
89 	       imp->imp_peer_committed_transno, last_transno);
90 
91 	/* Do I need to hold a lock across this iteration?  We shouldn't be
92 	 * racing with any additions to the list, because we're in recovery
93 	 * and are therefore not processing additional requests to add.  Calls
94 	 * to ptlrpc_free_committed might commit requests, but nothing "newer"
95 	 * than the one we're replaying (it can't be committed until it's
96 	 * replayed, and we're doing that here).  l_f_e_safe protects against
97 	 * problems with the current request being committed, in the unlikely
98 	 * event of that race.  So, in conclusion, I think that it's safe to
99 	 * perform this list-walk without the imp_lock held.
100 	 *
101 	 * But, the {mdc,osc}_replay_open callbacks both iterate
102 	 * request lists, and have comments saying they assume the
103 	 * imp_lock is being held by ptlrpc_replay, but it's not. it's
104 	 * just a little race...
105 	 */
106 
107 	/* Replay all the committed open requests on committed_list first */
108 	if (!list_empty(&imp->imp_committed_list)) {
109 		tmp = imp->imp_committed_list.prev;
110 		req = list_entry(tmp, struct ptlrpc_request,
111 				     rq_replay_list);
112 
113 		/* The last request on committed_list hasn't been replayed */
114 		if (req->rq_transno > last_transno) {
115 			/* Since the imp_committed_list is immutable before
116 			 * all of it's requests being replayed, it's safe to
117 			 * use a cursor to accelerate the search */
118 			imp->imp_replay_cursor = imp->imp_replay_cursor->next;
119 
120 			while (imp->imp_replay_cursor !=
121 			       &imp->imp_committed_list) {
122 				req = list_entry(imp->imp_replay_cursor,
123 						 struct ptlrpc_request,
124 						 rq_replay_list);
125 				if (req->rq_transno > last_transno)
126 					break;
127 
128 				req = NULL;
129 				imp->imp_replay_cursor =
130 					imp->imp_replay_cursor->next;
131 			}
132 		} else {
133 			/* All requests on committed_list have been replayed */
134 			imp->imp_replay_cursor = &imp->imp_committed_list;
135 			req = NULL;
136 		}
137 	}
138 
139 	/* All the requests in committed list have been replayed, let's replay
140 	 * the imp_replay_list */
141 	if (req == NULL) {
142 		list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
143 			req = list_entry(tmp, struct ptlrpc_request,
144 					 rq_replay_list);
145 
146 			if (req->rq_transno > last_transno)
147 				break;
148 			req = NULL;
149 		}
150 	}
151 
152 	/* If need to resend the last sent transno (because a reconnect
153 	 * has occurred), then stop on the matching req and send it again.
154 	 * If, however, the last sent transno has been committed then we
155 	 * continue replay from the next request. */
156 	if (req != NULL && imp->imp_resend_replay)
157 		lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
158 
159 	spin_lock(&imp->imp_lock);
160 	imp->imp_resend_replay = 0;
161 	spin_unlock(&imp->imp_lock);
162 
163 	if (req != NULL) {
164 		rc = ptlrpc_replay_req(req);
165 		if (rc) {
166 			CERROR("recovery replay error %d for req %llu\n",
167 			       rc, req->rq_xid);
168 			return rc;
169 		}
170 		*inflight = 1;
171 	}
172 	return rc;
173 }
174 
175 /**
176  * Schedule resending of request on sending_list. This is done after
177  * we completed replaying of requests and locks.
178  */
ptlrpc_resend(struct obd_import * imp)179 int ptlrpc_resend(struct obd_import *imp)
180 {
181 	struct ptlrpc_request *req, *next;
182 
183 	/* As long as we're in recovery, nothing should be added to the sending
184 	 * list, so we don't need to hold the lock during this iteration and
185 	 * resend process.
186 	 */
187 	/* Well... what if lctl recover is called twice at the same time?
188 	 */
189 	spin_lock(&imp->imp_lock);
190 	if (imp->imp_state != LUSTRE_IMP_RECOVER) {
191 		spin_unlock(&imp->imp_lock);
192 		return -1;
193 	}
194 
195 	list_for_each_entry_safe(req, next, &imp->imp_sending_list,
196 				     rq_list) {
197 		LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
198 			 "req %p bad\n", req);
199 		LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
200 		if (!ptlrpc_no_resend(req))
201 			ptlrpc_resend_req(req);
202 	}
203 	spin_unlock(&imp->imp_lock);
204 
205 	return 0;
206 }
207 EXPORT_SYMBOL(ptlrpc_resend);
208 
209 /**
210  * Go through all requests in delayed list and wake their threads
211  * for resending
212  */
ptlrpc_wake_delayed(struct obd_import * imp)213 void ptlrpc_wake_delayed(struct obd_import *imp)
214 {
215 	struct list_head *tmp, *pos;
216 	struct ptlrpc_request *req;
217 
218 	spin_lock(&imp->imp_lock);
219 	list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
220 		req = list_entry(tmp, struct ptlrpc_request, rq_list);
221 
222 		DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
223 		ptlrpc_client_wake_req(req);
224 	}
225 	spin_unlock(&imp->imp_lock);
226 }
227 EXPORT_SYMBOL(ptlrpc_wake_delayed);
228 
ptlrpc_request_handle_notconn(struct ptlrpc_request * failed_req)229 void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
230 {
231 	struct obd_import *imp = failed_req->rq_import;
232 
233 	CDEBUG(D_HA, "import %s of %s@%s abruptly disconnected: reconnecting\n",
234 	       imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
235 	       imp->imp_connection->c_remote_uuid.uuid);
236 
237 	if (ptlrpc_set_import_discon(imp,
238 			      lustre_msg_get_conn_cnt(failed_req->rq_reqmsg))) {
239 		if (!imp->imp_replayable) {
240 			CDEBUG(D_HA, "import %s@%s for %s not replayable, auto-deactivating\n",
241 			       obd2cli_tgt(imp->imp_obd),
242 			       imp->imp_connection->c_remote_uuid.uuid,
243 			       imp->imp_obd->obd_name);
244 			ptlrpc_deactivate_import(imp);
245 		}
246 		/* to control recovery via lctl {disable|enable}_recovery */
247 		if (imp->imp_deactive == 0)
248 			ptlrpc_connect_import(imp);
249 	}
250 
251 	/* Wait for recovery to complete and resend. If evicted, then
252 	   this request will be errored out later.*/
253 	spin_lock(&failed_req->rq_lock);
254 	if (!failed_req->rq_no_resend)
255 		failed_req->rq_resend = 1;
256 	spin_unlock(&failed_req->rq_lock);
257 }
258 
259 /**
260  * Administratively active/deactive a client.
261  * This should only be called by the ioctl interface, currently
262  *  - the lctl deactivate and activate commands
263  *  - echo 0/1 >> /proc/osc/XXX/active
264  *  - client umount -f (ll_umount_begin)
265  */
ptlrpc_set_import_active(struct obd_import * imp,int active)266 int ptlrpc_set_import_active(struct obd_import *imp, int active)
267 {
268 	struct obd_device *obd = imp->imp_obd;
269 	int rc = 0;
270 
271 	LASSERT(obd);
272 
273 	/* When deactivating, mark import invalid, and abort in-flight
274 	 * requests. */
275 	if (!active) {
276 		LCONSOLE_WARN("setting import %s INACTIVE by administrator request\n",
277 			      obd2cli_tgt(imp->imp_obd));
278 
279 		/* set before invalidate to avoid messages about imp_inval
280 		 * set without imp_deactive in ptlrpc_import_delay_req */
281 		spin_lock(&imp->imp_lock);
282 		imp->imp_deactive = 1;
283 		spin_unlock(&imp->imp_lock);
284 
285 		obd_import_event(imp->imp_obd, imp, IMP_EVENT_DEACTIVATE);
286 
287 		ptlrpc_invalidate_import(imp);
288 	}
289 
290 	/* When activating, mark import valid, and attempt recovery */
291 	if (active) {
292 		CDEBUG(D_HA, "setting import %s VALID\n",
293 		       obd2cli_tgt(imp->imp_obd));
294 
295 		spin_lock(&imp->imp_lock);
296 		imp->imp_deactive = 0;
297 		spin_unlock(&imp->imp_lock);
298 		obd_import_event(imp->imp_obd, imp, IMP_EVENT_ACTIVATE);
299 
300 		rc = ptlrpc_recover_import(imp, NULL, 0);
301 	}
302 
303 	return rc;
304 }
305 EXPORT_SYMBOL(ptlrpc_set_import_active);
306 
307 /* Attempt to reconnect an import */
ptlrpc_recover_import(struct obd_import * imp,char * new_uuid,int async)308 int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
309 {
310 	int rc = 0;
311 
312 	spin_lock(&imp->imp_lock);
313 	if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
314 	    atomic_read(&imp->imp_inval_count))
315 		rc = -EINVAL;
316 	spin_unlock(&imp->imp_lock);
317 	if (rc)
318 		goto out;
319 
320 	/* force import to be disconnected. */
321 	ptlrpc_set_import_discon(imp, 0);
322 
323 	if (new_uuid) {
324 		struct obd_uuid uuid;
325 
326 		/* intruct import to use new uuid */
327 		obd_str2uuid(&uuid, new_uuid);
328 		rc = import_set_conn_priority(imp, &uuid);
329 		if (rc)
330 			goto out;
331 	}
332 
333 	/* Check if reconnect is already in progress */
334 	spin_lock(&imp->imp_lock);
335 	if (imp->imp_state != LUSTRE_IMP_DISCON) {
336 		imp->imp_force_verify = 1;
337 		rc = -EALREADY;
338 	}
339 	spin_unlock(&imp->imp_lock);
340 	if (rc)
341 		goto out;
342 
343 	rc = ptlrpc_connect_import(imp);
344 	if (rc)
345 		goto out;
346 
347 	if (!async) {
348 		struct l_wait_info lwi;
349 		int secs = cfs_time_seconds(obd_timeout);
350 
351 		CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
352 		       obd2cli_tgt(imp->imp_obd), secs);
353 
354 		lwi = LWI_TIMEOUT(secs, NULL, NULL);
355 		rc = l_wait_event(imp->imp_recovery_waitq,
356 				  !ptlrpc_import_in_recovery(imp), &lwi);
357 		CDEBUG(D_HA, "%s: recovery finished\n",
358 		       obd2cli_tgt(imp->imp_obd));
359 	}
360 
361 out:
362 	return rc;
363 }
364 EXPORT_SYMBOL(ptlrpc_recover_import);
365 
ptlrpc_import_in_recovery(struct obd_import * imp)366 int ptlrpc_import_in_recovery(struct obd_import *imp)
367 {
368 	int in_recovery = 1;
369 
370 	spin_lock(&imp->imp_lock);
371 	if (imp->imp_state == LUSTRE_IMP_FULL ||
372 	    imp->imp_state == LUSTRE_IMP_CLOSED ||
373 	    imp->imp_state == LUSTRE_IMP_DISCON ||
374 	    imp->imp_obd->obd_no_recov)
375 		in_recovery = 0;
376 	spin_unlock(&imp->imp_lock);
377 
378 	return in_recovery;
379 }
380