1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/sec_plain.c
37  *
38  * Author: Eric Mei <ericm@clusterfs.com>
39  */
40 
41 #define DEBUG_SUBSYSTEM S_SEC
42 
43 
44 #include "../include/obd_support.h"
45 #include "../include/obd_cksum.h"
46 #include "../include/obd_class.h"
47 #include "../include/lustre_net.h"
48 #include "../include/lustre_sec.h"
49 
50 struct plain_sec {
51 	struct ptlrpc_sec       pls_base;
52 	rwlock_t	    pls_lock;
53 	struct ptlrpc_cli_ctx  *pls_ctx;
54 };
55 
sec2plsec(struct ptlrpc_sec * sec)56 static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
57 {
58 	return container_of(sec, struct plain_sec, pls_base);
59 }
60 
61 static struct ptlrpc_sec_policy plain_policy;
62 static struct ptlrpc_ctx_ops    plain_ctx_ops;
63 static struct ptlrpc_svc_ctx    plain_svc_ctx;
64 
65 static unsigned int plain_at_offset;
66 
67 /*
68  * for simplicity, plain policy rpc use fixed layout.
69  */
70 #define PLAIN_PACK_SEGMENTS	     (4)
71 
72 #define PLAIN_PACK_HDR_OFF	      (0)
73 #define PLAIN_PACK_MSG_OFF	      (1)
74 #define PLAIN_PACK_USER_OFF	     (2)
75 #define PLAIN_PACK_BULK_OFF	     (3)
76 
77 #define PLAIN_FL_USER		   (0x01)
78 #define PLAIN_FL_BULK		   (0x02)
79 
80 struct plain_header {
81 	__u8	    ph_ver;	    /* 0 */
82 	__u8	    ph_flags;
83 	__u8	    ph_sp;	     /* source */
84 	__u8	    ph_bulk_hash_alg;  /* complete flavor desc */
85 	__u8	    ph_pad[4];
86 };
87 
88 struct plain_bulk_token {
89 	__u8	    pbt_hash[8];
90 };
91 
92 #define PLAIN_BSD_SIZE \
93 	(sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
94 
95 /****************************************
96  * bulk checksum helpers		*
97  ****************************************/
98 
plain_unpack_bsd(struct lustre_msg * msg,int swabbed)99 static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
100 {
101 	struct ptlrpc_bulk_sec_desc *bsd;
102 
103 	if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed))
104 		return -EPROTO;
105 
106 	bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
107 	if (bsd == NULL) {
108 		CERROR("bulk sec desc has short size %d\n",
109 		       lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
110 		return -EPROTO;
111 	}
112 
113 	if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
114 	    bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
115 		CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
116 		return -EPROTO;
117 	}
118 
119 	return 0;
120 }
121 
plain_generate_bulk_csum(struct ptlrpc_bulk_desc * desc,__u8 hash_alg,struct plain_bulk_token * token)122 static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
123 				    __u8 hash_alg,
124 				    struct plain_bulk_token *token)
125 {
126 	if (hash_alg == BULK_HASH_ALG_NULL)
127 		return 0;
128 
129 	memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
130 	return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
131 					 sizeof(token->pbt_hash));
132 }
133 
plain_verify_bulk_csum(struct ptlrpc_bulk_desc * desc,__u8 hash_alg,struct plain_bulk_token * tokenr)134 static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
135 				  __u8 hash_alg,
136 				  struct plain_bulk_token *tokenr)
137 {
138 	struct plain_bulk_token tokenv;
139 	int		     rc;
140 
141 	if (hash_alg == BULK_HASH_ALG_NULL)
142 		return 0;
143 
144 	memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
145 	rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
146 				       sizeof(tokenv.pbt_hash));
147 	if (rc)
148 		return rc;
149 
150 	if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
151 		return -EACCES;
152 	return 0;
153 }
154 
corrupt_bulk_data(struct ptlrpc_bulk_desc * desc)155 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
156 {
157 	char	   *ptr;
158 	unsigned int    off, i;
159 
160 	for (i = 0; i < desc->bd_iov_count; i++) {
161 		if (desc->bd_iov[i].kiov_len == 0)
162 			continue;
163 
164 		ptr = kmap(desc->bd_iov[i].kiov_page);
165 		off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
166 		ptr[off] ^= 0x1;
167 		kunmap(desc->bd_iov[i].kiov_page);
168 		return;
169 	}
170 }
171 
172 /****************************************
173  * cli_ctx apis			 *
174  ****************************************/
175 
176 static
plain_ctx_refresh(struct ptlrpc_cli_ctx * ctx)177 int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
178 {
179 	/* should never reach here */
180 	LBUG();
181 	return 0;
182 }
183 
184 static
plain_ctx_validate(struct ptlrpc_cli_ctx * ctx)185 int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
186 {
187 	return 0;
188 }
189 
190 static
plain_ctx_sign(struct ptlrpc_cli_ctx * ctx,struct ptlrpc_request * req)191 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
192 {
193 	struct lustre_msg   *msg = req->rq_reqbuf;
194 	struct plain_header *phdr;
195 
196 	msg->lm_secflvr = req->rq_flvr.sf_rpc;
197 
198 	phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
199 	phdr->ph_ver = 0;
200 	phdr->ph_flags = 0;
201 	phdr->ph_sp = ctx->cc_sec->ps_part;
202 	phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
203 
204 	if (req->rq_pack_udesc)
205 		phdr->ph_flags |= PLAIN_FL_USER;
206 	if (req->rq_pack_bulk)
207 		phdr->ph_flags |= PLAIN_FL_BULK;
208 
209 	req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
210 						 msg->lm_buflens);
211 	return 0;
212 }
213 
214 static
plain_ctx_verify(struct ptlrpc_cli_ctx * ctx,struct ptlrpc_request * req)215 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
216 {
217 	struct lustre_msg   *msg = req->rq_repdata;
218 	struct plain_header *phdr;
219 	__u32		cksum;
220 	int		  swabbed;
221 
222 	if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
223 		CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
224 		return -EPROTO;
225 	}
226 
227 	swabbed = ptlrpc_rep_need_swab(req);
228 
229 	phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
230 	if (phdr == NULL) {
231 		CERROR("missing plain header\n");
232 		return -EPROTO;
233 	}
234 
235 	if (phdr->ph_ver != 0) {
236 		CERROR("Invalid header version\n");
237 		return -EPROTO;
238 	}
239 
240 	/* expect no user desc in reply */
241 	if (phdr->ph_flags & PLAIN_FL_USER) {
242 		CERROR("Unexpected udesc flag in reply\n");
243 		return -EPROTO;
244 	}
245 
246 	if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
247 		CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
248 		       req->rq_flvr.u_bulk.hash.hash_alg);
249 		return -EPROTO;
250 	}
251 
252 	if (unlikely(req->rq_early)) {
253 		unsigned int hsize = 4;
254 
255 		cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
256 				lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
257 				lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
258 				NULL, 0, (unsigned char *)&cksum, &hsize);
259 		if (cksum != msg->lm_cksum) {
260 			CDEBUG(D_SEC,
261 			       "early reply checksum mismatch: %08x != %08x\n",
262 			       cpu_to_le32(cksum), msg->lm_cksum);
263 			return -EINVAL;
264 		}
265 	} else {
266 		/* whether we sent with bulk or not, we expect the same
267 		 * in reply, except for early reply */
268 		if (!req->rq_early &&
269 		    !equi(req->rq_pack_bulk == 1,
270 			  phdr->ph_flags & PLAIN_FL_BULK)) {
271 			CERROR("%s bulk checksum in reply\n",
272 			       req->rq_pack_bulk ? "Missing" : "Unexpected");
273 			return -EPROTO;
274 		}
275 
276 		if (phdr->ph_flags & PLAIN_FL_BULK) {
277 			if (plain_unpack_bsd(msg, swabbed))
278 				return -EPROTO;
279 		}
280 	}
281 
282 	req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
283 	req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
284 	return 0;
285 }
286 
287 static
plain_cli_wrap_bulk(struct ptlrpc_cli_ctx * ctx,struct ptlrpc_request * req,struct ptlrpc_bulk_desc * desc)288 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
289 			struct ptlrpc_request *req,
290 			struct ptlrpc_bulk_desc *desc)
291 {
292 	struct ptlrpc_bulk_sec_desc *bsd;
293 	struct plain_bulk_token     *token;
294 	int			  rc;
295 
296 	LASSERT(req->rq_pack_bulk);
297 	LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
298 
299 	bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
300 	token = (struct plain_bulk_token *) bsd->bsd_data;
301 
302 	bsd->bsd_version = 0;
303 	bsd->bsd_flags = 0;
304 	bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
305 	bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
306 
307 	if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
308 		return 0;
309 
310 	if (req->rq_bulk_read)
311 		return 0;
312 
313 	rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
314 				      token);
315 	if (rc) {
316 		CERROR("bulk write: failed to compute checksum: %d\n", rc);
317 	} else {
318 		/*
319 		 * for sending we only compute the wrong checksum instead
320 		 * of corrupting the data so it is still correct on a redo
321 		 */
322 		if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
323 		    req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
324 			token->pbt_hash[0] ^= 0x1;
325 	}
326 
327 	return rc;
328 }
329 
330 static
plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx * ctx,struct ptlrpc_request * req,struct ptlrpc_bulk_desc * desc)331 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
332 			  struct ptlrpc_request *req,
333 			  struct ptlrpc_bulk_desc *desc)
334 {
335 	struct ptlrpc_bulk_sec_desc *bsdv;
336 	struct plain_bulk_token     *tokenv;
337 	int			  rc;
338 	int			  i, nob;
339 
340 	LASSERT(req->rq_pack_bulk);
341 	LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
342 	LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
343 
344 	bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
345 	tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
346 
347 	if (req->rq_bulk_write) {
348 		if (bsdv->bsd_flags & BSD_FL_ERR)
349 			return -EIO;
350 		return 0;
351 	}
352 
353 	/* fix the actual data size */
354 	for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
355 		if (desc->bd_iov[i].kiov_len + nob > desc->bd_nob_transferred) {
356 			desc->bd_iov[i].kiov_len =
357 				desc->bd_nob_transferred - nob;
358 		}
359 		nob += desc->bd_iov[i].kiov_len;
360 	}
361 
362 	rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
363 				    tokenv);
364 	if (rc)
365 		CERROR("bulk read: client verify failed: %d\n", rc);
366 
367 	return rc;
368 }
369 
370 /****************************************
371  * sec apis			     *
372  ****************************************/
373 
374 static
plain_sec_install_ctx(struct plain_sec * plsec)375 struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
376 {
377 	struct ptlrpc_cli_ctx  *ctx, *ctx_new;
378 
379 	OBD_ALLOC_PTR(ctx_new);
380 
381 	write_lock(&plsec->pls_lock);
382 
383 	ctx = plsec->pls_ctx;
384 	if (ctx) {
385 		atomic_inc(&ctx->cc_refcount);
386 
387 		if (ctx_new)
388 			OBD_FREE_PTR(ctx_new);
389 	} else if (ctx_new) {
390 		ctx = ctx_new;
391 
392 		atomic_set(&ctx->cc_refcount, 1); /* for cache */
393 		ctx->cc_sec = &plsec->pls_base;
394 		ctx->cc_ops = &plain_ctx_ops;
395 		ctx->cc_expire = 0;
396 		ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
397 		ctx->cc_vcred.vc_uid = 0;
398 		spin_lock_init(&ctx->cc_lock);
399 		INIT_LIST_HEAD(&ctx->cc_req_list);
400 		INIT_LIST_HEAD(&ctx->cc_gc_chain);
401 
402 		plsec->pls_ctx = ctx;
403 		atomic_inc(&plsec->pls_base.ps_nctx);
404 		atomic_inc(&plsec->pls_base.ps_refcount);
405 
406 		atomic_inc(&ctx->cc_refcount); /* for caller */
407 	}
408 
409 	write_unlock(&plsec->pls_lock);
410 
411 	return ctx;
412 }
413 
414 static
plain_destroy_sec(struct ptlrpc_sec * sec)415 void plain_destroy_sec(struct ptlrpc_sec *sec)
416 {
417 	struct plain_sec       *plsec = sec2plsec(sec);
418 
419 	LASSERT(sec->ps_policy == &plain_policy);
420 	LASSERT(sec->ps_import);
421 	LASSERT(atomic_read(&sec->ps_refcount) == 0);
422 	LASSERT(atomic_read(&sec->ps_nctx) == 0);
423 	LASSERT(plsec->pls_ctx == NULL);
424 
425 	class_import_put(sec->ps_import);
426 
427 	OBD_FREE_PTR(plsec);
428 }
429 
430 static
plain_kill_sec(struct ptlrpc_sec * sec)431 void plain_kill_sec(struct ptlrpc_sec *sec)
432 {
433 	sec->ps_dying = 1;
434 }
435 
436 static
plain_create_sec(struct obd_import * imp,struct ptlrpc_svc_ctx * svc_ctx,struct sptlrpc_flavor * sf)437 struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
438 				    struct ptlrpc_svc_ctx *svc_ctx,
439 				    struct sptlrpc_flavor *sf)
440 {
441 	struct plain_sec       *plsec;
442 	struct ptlrpc_sec      *sec;
443 	struct ptlrpc_cli_ctx  *ctx;
444 
445 	LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
446 
447 	OBD_ALLOC_PTR(plsec);
448 	if (plsec == NULL)
449 		return NULL;
450 
451 	/*
452 	 * initialize plain_sec
453 	 */
454 	rwlock_init(&plsec->pls_lock);
455 	plsec->pls_ctx = NULL;
456 
457 	sec = &plsec->pls_base;
458 	sec->ps_policy = &plain_policy;
459 	atomic_set(&sec->ps_refcount, 0);
460 	atomic_set(&sec->ps_nctx, 0);
461 	sec->ps_id = sptlrpc_get_next_secid();
462 	sec->ps_import = class_import_get(imp);
463 	sec->ps_flvr = *sf;
464 	spin_lock_init(&sec->ps_lock);
465 	INIT_LIST_HEAD(&sec->ps_gc_list);
466 	sec->ps_gc_interval = 0;
467 	sec->ps_gc_next = 0;
468 
469 	/* install ctx immediately if this is a reverse sec */
470 	if (svc_ctx) {
471 		ctx = plain_sec_install_ctx(plsec);
472 		if (ctx == NULL) {
473 			plain_destroy_sec(sec);
474 			return NULL;
475 		}
476 		sptlrpc_cli_ctx_put(ctx, 1);
477 	}
478 
479 	return sec;
480 }
481 
482 static
plain_lookup_ctx(struct ptlrpc_sec * sec,struct vfs_cred * vcred,int create,int remove_dead)483 struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
484 					struct vfs_cred *vcred,
485 					int create, int remove_dead)
486 {
487 	struct plain_sec       *plsec = sec2plsec(sec);
488 	struct ptlrpc_cli_ctx  *ctx;
489 
490 	read_lock(&plsec->pls_lock);
491 	ctx = plsec->pls_ctx;
492 	if (ctx)
493 		atomic_inc(&ctx->cc_refcount);
494 	read_unlock(&plsec->pls_lock);
495 
496 	if (unlikely(ctx == NULL))
497 		ctx = plain_sec_install_ctx(plsec);
498 
499 	return ctx;
500 }
501 
502 static
plain_release_ctx(struct ptlrpc_sec * sec,struct ptlrpc_cli_ctx * ctx,int sync)503 void plain_release_ctx(struct ptlrpc_sec *sec,
504 		       struct ptlrpc_cli_ctx *ctx, int sync)
505 {
506 	LASSERT(atomic_read(&sec->ps_refcount) > 0);
507 	LASSERT(atomic_read(&sec->ps_nctx) > 0);
508 	LASSERT(atomic_read(&ctx->cc_refcount) == 0);
509 	LASSERT(ctx->cc_sec == sec);
510 
511 	OBD_FREE_PTR(ctx);
512 
513 	atomic_dec(&sec->ps_nctx);
514 	sptlrpc_sec_put(sec);
515 }
516 
517 static
plain_flush_ctx_cache(struct ptlrpc_sec * sec,uid_t uid,int grace,int force)518 int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
519 			  uid_t uid, int grace, int force)
520 {
521 	struct plain_sec       *plsec = sec2plsec(sec);
522 	struct ptlrpc_cli_ctx  *ctx;
523 
524 	/* do nothing unless caller want to flush for 'all' */
525 	if (uid != -1)
526 		return 0;
527 
528 	write_lock(&plsec->pls_lock);
529 	ctx = plsec->pls_ctx;
530 	plsec->pls_ctx = NULL;
531 	write_unlock(&plsec->pls_lock);
532 
533 	if (ctx)
534 		sptlrpc_cli_ctx_put(ctx, 1);
535 	return 0;
536 }
537 
538 static
plain_alloc_reqbuf(struct ptlrpc_sec * sec,struct ptlrpc_request * req,int msgsize)539 int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
540 		       struct ptlrpc_request *req,
541 		       int msgsize)
542 {
543 	__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
544 	int   alloc_len;
545 
546 	buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
547 	buflens[PLAIN_PACK_MSG_OFF] = msgsize;
548 
549 	if (req->rq_pack_udesc)
550 		buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
551 
552 	if (req->rq_pack_bulk) {
553 		LASSERT(req->rq_bulk_read || req->rq_bulk_write);
554 		buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
555 	}
556 
557 	alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
558 
559 	if (!req->rq_reqbuf) {
560 		LASSERT(!req->rq_pool);
561 
562 		alloc_len = size_roundup_power2(alloc_len);
563 		OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_len);
564 		if (!req->rq_reqbuf)
565 			return -ENOMEM;
566 
567 		req->rq_reqbuf_len = alloc_len;
568 	} else {
569 		LASSERT(req->rq_pool);
570 		LASSERT(req->rq_reqbuf_len >= alloc_len);
571 		memset(req->rq_reqbuf, 0, alloc_len);
572 	}
573 
574 	lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
575 	req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
576 
577 	if (req->rq_pack_udesc)
578 		sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
579 
580 	return 0;
581 }
582 
583 static
plain_free_reqbuf(struct ptlrpc_sec * sec,struct ptlrpc_request * req)584 void plain_free_reqbuf(struct ptlrpc_sec *sec,
585 		       struct ptlrpc_request *req)
586 {
587 	if (!req->rq_pool) {
588 		OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
589 		req->rq_reqbuf = NULL;
590 		req->rq_reqbuf_len = 0;
591 	}
592 }
593 
594 static
plain_alloc_repbuf(struct ptlrpc_sec * sec,struct ptlrpc_request * req,int msgsize)595 int plain_alloc_repbuf(struct ptlrpc_sec *sec,
596 		       struct ptlrpc_request *req,
597 		       int msgsize)
598 {
599 	__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
600 	int alloc_len;
601 
602 	buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
603 	buflens[PLAIN_PACK_MSG_OFF] = msgsize;
604 
605 	if (req->rq_pack_bulk) {
606 		LASSERT(req->rq_bulk_read || req->rq_bulk_write);
607 		buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
608 	}
609 
610 	alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
611 
612 	/* add space for early reply */
613 	alloc_len += plain_at_offset;
614 
615 	alloc_len = size_roundup_power2(alloc_len);
616 
617 	OBD_ALLOC_LARGE(req->rq_repbuf, alloc_len);
618 	if (!req->rq_repbuf)
619 		return -ENOMEM;
620 
621 	req->rq_repbuf_len = alloc_len;
622 	return 0;
623 }
624 
625 static
plain_free_repbuf(struct ptlrpc_sec * sec,struct ptlrpc_request * req)626 void plain_free_repbuf(struct ptlrpc_sec *sec,
627 		       struct ptlrpc_request *req)
628 {
629 	OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
630 	req->rq_repbuf = NULL;
631 	req->rq_repbuf_len = 0;
632 }
633 
634 static
plain_enlarge_reqbuf(struct ptlrpc_sec * sec,struct ptlrpc_request * req,int segment,int newsize)635 int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
636 			 struct ptlrpc_request *req,
637 			 int segment, int newsize)
638 {
639 	struct lustre_msg      *newbuf;
640 	int		     oldsize;
641 	int		     newmsg_size, newbuf_size;
642 
643 	LASSERT(req->rq_reqbuf);
644 	LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
645 	LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
646 		req->rq_reqmsg);
647 
648 	/* compute new embedded msg size.  */
649 	oldsize = req->rq_reqmsg->lm_buflens[segment];
650 	req->rq_reqmsg->lm_buflens[segment] = newsize;
651 	newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
652 					 req->rq_reqmsg->lm_buflens);
653 	req->rq_reqmsg->lm_buflens[segment] = oldsize;
654 
655 	/* compute new wrapper msg size.  */
656 	oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
657 	req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
658 	newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
659 					 req->rq_reqbuf->lm_buflens);
660 	req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
661 
662 	/* request from pool should always have enough buffer */
663 	LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
664 
665 	if (req->rq_reqbuf_len < newbuf_size) {
666 		newbuf_size = size_roundup_power2(newbuf_size);
667 
668 		OBD_ALLOC_LARGE(newbuf, newbuf_size);
669 		if (newbuf == NULL)
670 			return -ENOMEM;
671 
672 		/* Must lock this, so that otherwise unprotected change of
673 		 * rq_reqmsg is not racing with parallel processing of
674 		 * imp_replay_list traversing threads. See LU-3333
675 		 * This is a bandaid at best, we really need to deal with this
676 		 * in request enlarging code before unpacking that's already
677 		 * there */
678 		if (req->rq_import)
679 			spin_lock(&req->rq_import->imp_lock);
680 
681 		memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
682 
683 		OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
684 		req->rq_reqbuf = newbuf;
685 		req->rq_reqbuf_len = newbuf_size;
686 		req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
687 						PLAIN_PACK_MSG_OFF, 0);
688 
689 		if (req->rq_import)
690 			spin_unlock(&req->rq_import->imp_lock);
691 	}
692 
693 	_sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
694 				     newmsg_size);
695 	_sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
696 
697 	req->rq_reqlen = newmsg_size;
698 	return 0;
699 }
700 
701 /****************************************
702  * service apis			 *
703  ****************************************/
704 
705 static struct ptlrpc_svc_ctx plain_svc_ctx = {
706 	.sc_refcount    = ATOMIC_INIT(1),
707 	.sc_policy      = &plain_policy,
708 };
709 
710 static
plain_accept(struct ptlrpc_request * req)711 int plain_accept(struct ptlrpc_request *req)
712 {
713 	struct lustre_msg   *msg = req->rq_reqbuf;
714 	struct plain_header *phdr;
715 	int		  swabbed;
716 
717 	LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
718 		SPTLRPC_POLICY_PLAIN);
719 
720 	if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
721 	    SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
722 	    SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
723 	    SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
724 		CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
725 		return SECSVC_DROP;
726 	}
727 
728 	if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
729 		CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
730 		return SECSVC_DROP;
731 	}
732 
733 	swabbed = ptlrpc_req_need_swab(req);
734 
735 	phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
736 	if (phdr == NULL) {
737 		CERROR("missing plain header\n");
738 		return -EPROTO;
739 	}
740 
741 	if (phdr->ph_ver != 0) {
742 		CERROR("Invalid header version\n");
743 		return -EPROTO;
744 	}
745 
746 	if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
747 		CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
748 		return -EPROTO;
749 	}
750 
751 	req->rq_sp_from = phdr->ph_sp;
752 	req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
753 
754 	if (phdr->ph_flags & PLAIN_FL_USER) {
755 		if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
756 					     swabbed)) {
757 			CERROR("Mal-formed user descriptor\n");
758 			return SECSVC_DROP;
759 		}
760 
761 		req->rq_pack_udesc = 1;
762 		req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
763 	}
764 
765 	if (phdr->ph_flags & PLAIN_FL_BULK) {
766 		if (plain_unpack_bsd(msg, swabbed))
767 			return SECSVC_DROP;
768 
769 		req->rq_pack_bulk = 1;
770 	}
771 
772 	req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
773 	req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
774 
775 	req->rq_svc_ctx = &plain_svc_ctx;
776 	atomic_inc(&req->rq_svc_ctx->sc_refcount);
777 
778 	return SECSVC_OK;
779 }
780 
781 static
plain_alloc_rs(struct ptlrpc_request * req,int msgsize)782 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
783 {
784 	struct ptlrpc_reply_state   *rs;
785 	__u32			buflens[PLAIN_PACK_SEGMENTS] = { 0, };
786 	int			  rs_size = sizeof(*rs);
787 
788 	LASSERT(msgsize % 8 == 0);
789 
790 	buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
791 	buflens[PLAIN_PACK_MSG_OFF] = msgsize;
792 
793 	if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
794 		buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
795 
796 	rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
797 
798 	rs = req->rq_reply_state;
799 
800 	if (rs) {
801 		/* pre-allocated */
802 		LASSERT(rs->rs_size >= rs_size);
803 	} else {
804 		OBD_ALLOC_LARGE(rs, rs_size);
805 		if (rs == NULL)
806 			return -ENOMEM;
807 
808 		rs->rs_size = rs_size;
809 	}
810 
811 	rs->rs_svc_ctx = req->rq_svc_ctx;
812 	atomic_inc(&req->rq_svc_ctx->sc_refcount);
813 	rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
814 	rs->rs_repbuf_len = rs_size - sizeof(*rs);
815 
816 	lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
817 	rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
818 
819 	req->rq_reply_state = rs;
820 	return 0;
821 }
822 
823 static
plain_free_rs(struct ptlrpc_reply_state * rs)824 void plain_free_rs(struct ptlrpc_reply_state *rs)
825 {
826 	LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
827 	atomic_dec(&rs->rs_svc_ctx->sc_refcount);
828 
829 	if (!rs->rs_prealloc)
830 		OBD_FREE_LARGE(rs, rs->rs_size);
831 }
832 
833 static
plain_authorize(struct ptlrpc_request * req)834 int plain_authorize(struct ptlrpc_request *req)
835 {
836 	struct ptlrpc_reply_state *rs = req->rq_reply_state;
837 	struct lustre_msg_v2      *msg = rs->rs_repbuf;
838 	struct plain_header       *phdr;
839 	int			len;
840 
841 	LASSERT(rs);
842 	LASSERT(msg);
843 
844 	if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
845 		len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
846 					req->rq_replen, 1);
847 	else
848 		len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
849 
850 	msg->lm_secflvr = req->rq_flvr.sf_rpc;
851 
852 	phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
853 	phdr->ph_ver = 0;
854 	phdr->ph_flags = 0;
855 	phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
856 
857 	if (req->rq_pack_bulk)
858 		phdr->ph_flags |= PLAIN_FL_BULK;
859 
860 	rs->rs_repdata_len = len;
861 
862 	if (likely(req->rq_packed_final)) {
863 		if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
864 			req->rq_reply_off = plain_at_offset;
865 		else
866 			req->rq_reply_off = 0;
867 	} else {
868 		unsigned int hsize = 4;
869 
870 		cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
871 			lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
872 			lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
873 			NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize);
874 			req->rq_reply_off = 0;
875 	}
876 
877 	return 0;
878 }
879 
880 static
plain_svc_unwrap_bulk(struct ptlrpc_request * req,struct ptlrpc_bulk_desc * desc)881 int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
882 			  struct ptlrpc_bulk_desc *desc)
883 {
884 	struct ptlrpc_reply_state   *rs = req->rq_reply_state;
885 	struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
886 	struct plain_bulk_token     *tokenr;
887 	int			  rc;
888 
889 	LASSERT(req->rq_bulk_write);
890 	LASSERT(req->rq_pack_bulk);
891 
892 	bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
893 	tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
894 	bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
895 
896 	bsdv->bsd_version = 0;
897 	bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
898 	bsdv->bsd_svc = bsdr->bsd_svc;
899 	bsdv->bsd_flags = 0;
900 
901 	if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
902 		return 0;
903 
904 	rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
905 				    tokenr);
906 	if (rc) {
907 		bsdv->bsd_flags |= BSD_FL_ERR;
908 		CERROR("bulk write: server verify failed: %d\n", rc);
909 	}
910 
911 	return rc;
912 }
913 
914 static
plain_svc_wrap_bulk(struct ptlrpc_request * req,struct ptlrpc_bulk_desc * desc)915 int plain_svc_wrap_bulk(struct ptlrpc_request *req,
916 			struct ptlrpc_bulk_desc *desc)
917 {
918 	struct ptlrpc_reply_state   *rs = req->rq_reply_state;
919 	struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
920 	struct plain_bulk_token     *tokenv;
921 	int			  rc;
922 
923 	LASSERT(req->rq_bulk_read);
924 	LASSERT(req->rq_pack_bulk);
925 
926 	bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
927 	bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
928 	tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
929 
930 	bsdv->bsd_version = 0;
931 	bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
932 	bsdv->bsd_svc = bsdr->bsd_svc;
933 	bsdv->bsd_flags = 0;
934 
935 	if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
936 		return 0;
937 
938 	rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
939 				      tokenv);
940 	if (rc) {
941 		CERROR("bulk read: server failed to compute checksum: %d\n",
942 		       rc);
943 	} else {
944 		if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
945 			corrupt_bulk_data(desc);
946 	}
947 
948 	return rc;
949 }
950 
951 static struct ptlrpc_ctx_ops plain_ctx_ops = {
952 	.refresh		= plain_ctx_refresh,
953 	.validate	       = plain_ctx_validate,
954 	.sign		   = plain_ctx_sign,
955 	.verify		 = plain_ctx_verify,
956 	.wrap_bulk	      = plain_cli_wrap_bulk,
957 	.unwrap_bulk	    = plain_cli_unwrap_bulk,
958 };
959 
960 static struct ptlrpc_sec_cops plain_sec_cops = {
961 	.create_sec	     = plain_create_sec,
962 	.destroy_sec	    = plain_destroy_sec,
963 	.kill_sec	       = plain_kill_sec,
964 	.lookup_ctx	     = plain_lookup_ctx,
965 	.release_ctx	    = plain_release_ctx,
966 	.flush_ctx_cache	= plain_flush_ctx_cache,
967 	.alloc_reqbuf	   = plain_alloc_reqbuf,
968 	.free_reqbuf	    = plain_free_reqbuf,
969 	.alloc_repbuf	   = plain_alloc_repbuf,
970 	.free_repbuf	    = plain_free_repbuf,
971 	.enlarge_reqbuf	 = plain_enlarge_reqbuf,
972 };
973 
974 static struct ptlrpc_sec_sops plain_sec_sops = {
975 	.accept		 = plain_accept,
976 	.alloc_rs	       = plain_alloc_rs,
977 	.authorize	      = plain_authorize,
978 	.free_rs		= plain_free_rs,
979 	.unwrap_bulk	    = plain_svc_unwrap_bulk,
980 	.wrap_bulk	      = plain_svc_wrap_bulk,
981 };
982 
983 static struct ptlrpc_sec_policy plain_policy = {
984 	.sp_owner	       = THIS_MODULE,
985 	.sp_name		= "plain",
986 	.sp_policy	      = SPTLRPC_POLICY_PLAIN,
987 	.sp_cops		= &plain_sec_cops,
988 	.sp_sops		= &plain_sec_sops,
989 };
990 
sptlrpc_plain_init(void)991 int sptlrpc_plain_init(void)
992 {
993 	__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
994 	int rc;
995 
996 	buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
997 	plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
998 
999 	rc = sptlrpc_register_policy(&plain_policy);
1000 	if (rc)
1001 		CERROR("failed to register: %d\n", rc);
1002 
1003 	return rc;
1004 }
1005 
sptlrpc_plain_fini(void)1006 void sptlrpc_plain_fini(void)
1007 {
1008 	int rc;
1009 
1010 	rc = sptlrpc_unregister_policy(&plain_policy);
1011 	if (rc)
1012 		CERROR("cannot unregister: %d\n", rc);
1013 }
1014