1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36
37 #ifndef _LUSTRE_SEC_H_
38 #define _LUSTRE_SEC_H_
39
40 /** \defgroup sptlrpc sptlrpc
41 *
42 * @{
43 */
44
45 /*
46 * to avoid include
47 */
48 struct obd_import;
49 struct obd_export;
50 struct ptlrpc_request;
51 struct ptlrpc_reply_state;
52 struct ptlrpc_bulk_desc;
53 struct brw_page;
54 /* Linux specific */
55 struct key;
56 struct seq_file;
57
58 /*
59 * forward declaration
60 */
61 struct ptlrpc_sec_policy;
62 struct ptlrpc_sec_cops;
63 struct ptlrpc_sec_sops;
64 struct ptlrpc_sec;
65 struct ptlrpc_svc_ctx;
66 struct ptlrpc_cli_ctx;
67 struct ptlrpc_ctx_ops;
68
69 /**
70 * \addtogroup flavor flavor
71 *
72 * RPC flavor is represented by a 32 bits integer. Currently the high 12 bits
73 * are unused, must be set to 0 for future expansion.
74 * <pre>
75 * ------------------------------------------------------------------------
76 * | 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech) | 4b (policy) |
77 * ------------------------------------------------------------------------
78 * </pre>
79 *
80 * @{
81 */
82
83 /*
84 * flavor constants
85 */
86 enum sptlrpc_policy {
87 SPTLRPC_POLICY_NULL = 0,
88 SPTLRPC_POLICY_PLAIN = 1,
89 SPTLRPC_POLICY_GSS = 2,
90 SPTLRPC_POLICY_MAX,
91 };
92
93 enum sptlrpc_mech_null {
94 SPTLRPC_MECH_NULL = 0,
95 SPTLRPC_MECH_NULL_MAX,
96 };
97
98 enum sptlrpc_mech_plain {
99 SPTLRPC_MECH_PLAIN = 0,
100 SPTLRPC_MECH_PLAIN_MAX,
101 };
102
103 enum sptlrpc_mech_gss {
104 SPTLRPC_MECH_GSS_NULL = 0,
105 SPTLRPC_MECH_GSS_KRB5 = 1,
106 SPTLRPC_MECH_GSS_MAX,
107 };
108
109 enum sptlrpc_service_type {
110 SPTLRPC_SVC_NULL = 0, /**< no security */
111 SPTLRPC_SVC_AUTH = 1, /**< authentication only */
112 SPTLRPC_SVC_INTG = 2, /**< integrity */
113 SPTLRPC_SVC_PRIV = 3, /**< privacy */
114 SPTLRPC_SVC_MAX,
115 };
116
117 enum sptlrpc_bulk_type {
118 SPTLRPC_BULK_DEFAULT = 0, /**< follow rpc flavor */
119 SPTLRPC_BULK_HASH = 1, /**< hash integrity */
120 SPTLRPC_BULK_MAX,
121 };
122
123 enum sptlrpc_bulk_service {
124 SPTLRPC_BULK_SVC_NULL = 0, /**< no security */
125 SPTLRPC_BULK_SVC_AUTH = 1, /**< authentication only */
126 SPTLRPC_BULK_SVC_INTG = 2, /**< integrity */
127 SPTLRPC_BULK_SVC_PRIV = 3, /**< privacy */
128 SPTLRPC_BULK_SVC_MAX,
129 };
130
131 /*
132 * compose/extract macros
133 */
134 #define FLVR_POLICY_OFFSET (0)
135 #define FLVR_MECH_OFFSET (4)
136 #define FLVR_SVC_OFFSET (8)
137 #define FLVR_BULK_TYPE_OFFSET (12)
138 #define FLVR_BULK_SVC_OFFSET (16)
139
140 #define MAKE_FLVR(policy, mech, svc, btype, bsvc) \
141 (((__u32)(policy) << FLVR_POLICY_OFFSET) | \
142 ((__u32)(mech) << FLVR_MECH_OFFSET) | \
143 ((__u32)(svc) << FLVR_SVC_OFFSET) | \
144 ((__u32)(btype) << FLVR_BULK_TYPE_OFFSET) | \
145 ((__u32)(bsvc) << FLVR_BULK_SVC_OFFSET))
146
147 /*
148 * extraction
149 */
150 #define SPTLRPC_FLVR_POLICY(flavor) \
151 ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xF)
152 #define SPTLRPC_FLVR_MECH(flavor) \
153 ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xF)
154 #define SPTLRPC_FLVR_SVC(flavor) \
155 ((((__u32)(flavor)) >> FLVR_SVC_OFFSET) & 0xF)
156 #define SPTLRPC_FLVR_BULK_TYPE(flavor) \
157 ((((__u32)(flavor)) >> FLVR_BULK_TYPE_OFFSET) & 0xF)
158 #define SPTLRPC_FLVR_BULK_SVC(flavor) \
159 ((((__u32)(flavor)) >> FLVR_BULK_SVC_OFFSET) & 0xF)
160
161 #define SPTLRPC_FLVR_BASE(flavor) \
162 ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xFFF)
163 #define SPTLRPC_FLVR_BASE_SUB(flavor) \
164 ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xFF)
165
166 /*
167 * gss subflavors
168 */
169 #define MAKE_BASE_SUBFLVR(mech, svc) \
170 ((__u32)(mech) | \
171 ((__u32)(svc) << (FLVR_SVC_OFFSET - FLVR_MECH_OFFSET)))
172
173 #define SPTLRPC_SUBFLVR_KRB5N \
174 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_NULL)
175 #define SPTLRPC_SUBFLVR_KRB5A \
176 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_AUTH)
177 #define SPTLRPC_SUBFLVR_KRB5I \
178 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_INTG)
179 #define SPTLRPC_SUBFLVR_KRB5P \
180 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_PRIV)
181
182 /*
183 * "end user" flavors
184 */
185 #define SPTLRPC_FLVR_NULL \
186 MAKE_FLVR(SPTLRPC_POLICY_NULL, \
187 SPTLRPC_MECH_NULL, \
188 SPTLRPC_SVC_NULL, \
189 SPTLRPC_BULK_DEFAULT, \
190 SPTLRPC_BULK_SVC_NULL)
191 #define SPTLRPC_FLVR_PLAIN \
192 MAKE_FLVR(SPTLRPC_POLICY_PLAIN, \
193 SPTLRPC_MECH_PLAIN, \
194 SPTLRPC_SVC_NULL, \
195 SPTLRPC_BULK_HASH, \
196 SPTLRPC_BULK_SVC_INTG)
197 #define SPTLRPC_FLVR_KRB5N \
198 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
199 SPTLRPC_MECH_GSS_KRB5, \
200 SPTLRPC_SVC_NULL, \
201 SPTLRPC_BULK_DEFAULT, \
202 SPTLRPC_BULK_SVC_NULL)
203 #define SPTLRPC_FLVR_KRB5A \
204 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
205 SPTLRPC_MECH_GSS_KRB5, \
206 SPTLRPC_SVC_AUTH, \
207 SPTLRPC_BULK_DEFAULT, \
208 SPTLRPC_BULK_SVC_NULL)
209 #define SPTLRPC_FLVR_KRB5I \
210 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
211 SPTLRPC_MECH_GSS_KRB5, \
212 SPTLRPC_SVC_INTG, \
213 SPTLRPC_BULK_DEFAULT, \
214 SPTLRPC_BULK_SVC_INTG)
215 #define SPTLRPC_FLVR_KRB5P \
216 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
217 SPTLRPC_MECH_GSS_KRB5, \
218 SPTLRPC_SVC_PRIV, \
219 SPTLRPC_BULK_DEFAULT, \
220 SPTLRPC_BULK_SVC_PRIV)
221
222 #define SPTLRPC_FLVR_DEFAULT SPTLRPC_FLVR_NULL
223
224 #define SPTLRPC_FLVR_INVALID ((__u32) 0xFFFFFFFF)
225 #define SPTLRPC_FLVR_ANY ((__u32) 0xFFF00000)
226
227 /**
228 * extract the useful part from wire flavor
229 */
230 #define WIRE_FLVR(wflvr) (((__u32) (wflvr)) & 0x000FFFFF)
231
232 /** @} flavor */
233
flvr_set_svc(__u32 * flvr,__u32 svc)234 static inline void flvr_set_svc(__u32 *flvr, __u32 svc)
235 {
236 LASSERT(svc < SPTLRPC_SVC_MAX);
237 *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
238 SPTLRPC_FLVR_MECH(*flvr),
239 svc,
240 SPTLRPC_FLVR_BULK_TYPE(*flvr),
241 SPTLRPC_FLVR_BULK_SVC(*flvr));
242 }
243
flvr_set_bulk_svc(__u32 * flvr,__u32 svc)244 static inline void flvr_set_bulk_svc(__u32 *flvr, __u32 svc)
245 {
246 LASSERT(svc < SPTLRPC_BULK_SVC_MAX);
247 *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
248 SPTLRPC_FLVR_MECH(*flvr),
249 SPTLRPC_FLVR_SVC(*flvr),
250 SPTLRPC_FLVR_BULK_TYPE(*flvr),
251 svc);
252 }
253
254 struct bulk_spec_hash {
255 __u8 hash_alg;
256 };
257
258 /**
259 * Full description of flavors being used on a ptlrpc connection, include
260 * both regular RPC and bulk transfer parts.
261 */
262 struct sptlrpc_flavor {
263 /**
264 * wire flavor, should be renamed to sf_wire.
265 */
266 __u32 sf_rpc;
267 /**
268 * general flags of PTLRPC_SEC_FL_*
269 */
270 __u32 sf_flags;
271 /**
272 * rpc flavor specification
273 */
274 union {
275 /* nothing for now */
276 } u_rpc;
277 /**
278 * bulk flavor specification
279 */
280 union {
281 struct bulk_spec_hash hash;
282 } u_bulk;
283 };
284
285 /**
286 * identify the RPC is generated from what part of Lustre. It's encoded into
287 * RPC requests and to be checked by ptlrpc service.
288 */
289 enum lustre_sec_part {
290 LUSTRE_SP_CLI = 0,
291 LUSTRE_SP_MDT,
292 LUSTRE_SP_OST,
293 LUSTRE_SP_MGC,
294 LUSTRE_SP_MGS,
295 LUSTRE_SP_ANY = 0xFF
296 };
297
298 const char *sptlrpc_part2name(enum lustre_sec_part sp);
299 enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd);
300
301 /**
302 * A rule specifies a flavor to be used by a ptlrpc connection between
303 * two Lustre parts.
304 */
305 struct sptlrpc_rule {
306 __u32 sr_netid; /* LNET network ID */
307 __u8 sr_from; /* sec_part */
308 __u8 sr_to; /* sec_part */
309 __u16 sr_padding;
310 struct sptlrpc_flavor sr_flvr;
311 };
312
313 /**
314 * A set of rules in memory.
315 *
316 * Rules are generated and stored on MGS, and propagated to MDT, OST,
317 * and client when needed.
318 */
319 struct sptlrpc_rule_set {
320 int srs_nslot;
321 int srs_nrule;
322 struct sptlrpc_rule *srs_rules;
323 };
324
325 int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr);
326 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr);
327
sptlrpc_rule_set_init(struct sptlrpc_rule_set * set)328 static inline void sptlrpc_rule_set_init(struct sptlrpc_rule_set *set)
329 {
330 memset(set, 0, sizeof(*set));
331 }
332
333 void sptlrpc_rule_set_free(struct sptlrpc_rule_set *set);
334 int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *set);
335 int sptlrpc_rule_set_merge(struct sptlrpc_rule_set *set,
336 struct sptlrpc_rule *rule);
337 int sptlrpc_rule_set_choose(struct sptlrpc_rule_set *rset,
338 enum lustre_sec_part from,
339 enum lustre_sec_part to,
340 lnet_nid_t nid,
341 struct sptlrpc_flavor *sf);
342 void sptlrpc_rule_set_dump(struct sptlrpc_rule_set *set);
343
344 int sptlrpc_process_config(struct lustre_cfg *lcfg);
345 void sptlrpc_conf_log_start(const char *logname);
346 void sptlrpc_conf_log_stop(const char *logname);
347 void sptlrpc_conf_log_update_begin(const char *logname);
348 void sptlrpc_conf_log_update_end(const char *logname);
349 void sptlrpc_conf_client_adapt(struct obd_device *obd);
350 void sptlrpc_target_choose_flavor(struct sptlrpc_rule_set *rset,
351 enum lustre_sec_part from,
352 lnet_nid_t nid,
353 struct sptlrpc_flavor *flavor);
354
355 /* The maximum length of security payload. 1024 is enough for Kerberos 5,
356 * and should be enough for other future mechanisms but not sure.
357 * Only used by pre-allocated request/reply pool.
358 */
359 #define SPTLRPC_MAX_PAYLOAD (1024)
360
361
362 struct vfs_cred {
363 uint32_t vc_uid;
364 uint32_t vc_gid;
365 };
366
367 struct ptlrpc_ctx_ops {
368 /**
369 * To determine whether it's suitable to use the \a ctx for \a vcred.
370 */
371 int (*match) (struct ptlrpc_cli_ctx *ctx,
372 struct vfs_cred *vcred);
373
374 /**
375 * To bring the \a ctx uptodate.
376 */
377 int (*refresh) (struct ptlrpc_cli_ctx *ctx);
378
379 /**
380 * Validate the \a ctx.
381 */
382 int (*validate) (struct ptlrpc_cli_ctx *ctx);
383
384 /**
385 * Force the \a ctx to die.
386 */
387 void (*force_die) (struct ptlrpc_cli_ctx *ctx,
388 int grace);
389 int (*display) (struct ptlrpc_cli_ctx *ctx,
390 char *buf, int bufsize);
391
392 /**
393 * Sign the request message using \a ctx.
394 *
395 * \pre req->rq_reqmsg point to request message.
396 * \pre req->rq_reqlen is the request message length.
397 * \post req->rq_reqbuf point to request message with signature.
398 * \post req->rq_reqdata_len is set to the final request message size.
399 *
400 * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign().
401 */
402 int (*sign) (struct ptlrpc_cli_ctx *ctx,
403 struct ptlrpc_request *req);
404
405 /**
406 * Verify the reply message using \a ctx.
407 *
408 * \pre req->rq_repdata point to reply message with signature.
409 * \pre req->rq_repdata_len is the total reply message length.
410 * \post req->rq_repmsg point to reply message without signature.
411 * \post req->rq_replen is the reply message length.
412 *
413 * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify().
414 */
415 int (*verify) (struct ptlrpc_cli_ctx *ctx,
416 struct ptlrpc_request *req);
417
418 /**
419 * Encrypt the request message using \a ctx.
420 *
421 * \pre req->rq_reqmsg point to request message in clear text.
422 * \pre req->rq_reqlen is the request message length.
423 * \post req->rq_reqbuf point to request message.
424 * \post req->rq_reqdata_len is set to the final request message size.
425 *
426 * \see gss_cli_ctx_seal().
427 */
428 int (*seal) (struct ptlrpc_cli_ctx *ctx,
429 struct ptlrpc_request *req);
430
431 /**
432 * Decrypt the reply message using \a ctx.
433 *
434 * \pre req->rq_repdata point to encrypted reply message.
435 * \pre req->rq_repdata_len is the total cipher text length.
436 * \post req->rq_repmsg point to reply message in clear text.
437 * \post req->rq_replen is the reply message length in clear text.
438 *
439 * \see gss_cli_ctx_unseal().
440 */
441 int (*unseal) (struct ptlrpc_cli_ctx *ctx,
442 struct ptlrpc_request *req);
443
444 /**
445 * Wrap bulk request data. This is called before wrapping RPC
446 * request message.
447 *
448 * \pre bulk buffer is descripted by desc->bd_iov and
449 * desc->bd_iov_count. note for read it's just buffer, no data
450 * need to be sent; for write it contains data in clear text.
451 * \post when necessary, ptlrpc_bulk_sec_desc was properly prepared
452 * (usually inside of RPC request message).
453 * - encryption: cipher text bulk buffer is descripted by
454 * desc->bd_enc_iov and desc->bd_iov_count (currently assume iov
455 * count remains the same).
456 * - otherwise: bulk buffer is still desc->bd_iov and
457 * desc->bd_iov_count.
458 *
459 * \return 0: success.
460 * \return -ev: error code.
461 *
462 * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk().
463 */
464 int (*wrap_bulk) (struct ptlrpc_cli_ctx *ctx,
465 struct ptlrpc_request *req,
466 struct ptlrpc_bulk_desc *desc);
467
468 /**
469 * Unwrap bulk reply data. This is called after wrapping RPC
470 * reply message.
471 *
472 * \pre bulk buffer is descripted by desc->bd_iov/desc->bd_enc_iov and
473 * desc->bd_iov_count, according to wrap_bulk().
474 * \post final bulk data in clear text is placed in buffer described
475 * by desc->bd_iov and desc->bd_iov_count.
476 * \return +ve nob of actual bulk data in clear text.
477 * \return -ve error code.
478 *
479 * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk().
480 */
481 int (*unwrap_bulk) (struct ptlrpc_cli_ctx *ctx,
482 struct ptlrpc_request *req,
483 struct ptlrpc_bulk_desc *desc);
484 };
485
486 #define PTLRPC_CTX_NEW_BIT (0) /* newly created */
487 #define PTLRPC_CTX_UPTODATE_BIT (1) /* uptodate */
488 #define PTLRPC_CTX_DEAD_BIT (2) /* mark expired gracefully */
489 #define PTLRPC_CTX_ERROR_BIT (3) /* fatal error (refresh, etc.) */
490 #define PTLRPC_CTX_CACHED_BIT (8) /* in ctx cache (hash etc.) */
491 #define PTLRPC_CTX_ETERNAL_BIT (9) /* always valid */
492
493 #define PTLRPC_CTX_NEW (1 << PTLRPC_CTX_NEW_BIT)
494 #define PTLRPC_CTX_UPTODATE (1 << PTLRPC_CTX_UPTODATE_BIT)
495 #define PTLRPC_CTX_DEAD (1 << PTLRPC_CTX_DEAD_BIT)
496 #define PTLRPC_CTX_ERROR (1 << PTLRPC_CTX_ERROR_BIT)
497 #define PTLRPC_CTX_CACHED (1 << PTLRPC_CTX_CACHED_BIT)
498 #define PTLRPC_CTX_ETERNAL (1 << PTLRPC_CTX_ETERNAL_BIT)
499
500 #define PTLRPC_CTX_STATUS_MASK (PTLRPC_CTX_NEW_BIT | \
501 PTLRPC_CTX_UPTODATE | \
502 PTLRPC_CTX_DEAD | \
503 PTLRPC_CTX_ERROR)
504
505 struct ptlrpc_cli_ctx {
506 struct hlist_node cc_cache; /* linked into ctx cache */
507 atomic_t cc_refcount;
508 struct ptlrpc_sec *cc_sec;
509 struct ptlrpc_ctx_ops *cc_ops;
510 unsigned long cc_expire; /* in seconds */
511 unsigned int cc_early_expire:1;
512 unsigned long cc_flags;
513 struct vfs_cred cc_vcred;
514 spinlock_t cc_lock;
515 struct list_head cc_req_list; /* waiting reqs linked here */
516 struct list_head cc_gc_chain; /* linked to gc chain */
517 };
518
519 /**
520 * client side policy operation vector.
521 */
522 struct ptlrpc_sec_cops {
523 /**
524 * Given an \a imp, create and initialize a ptlrpc_sec structure.
525 * \param ctx service context:
526 * - regular import: \a ctx should be NULL;
527 * - reverse import: \a ctx is obtained from incoming request.
528 * \param flavor specify what flavor to use.
529 *
530 * When necessary, policy module is responsible for taking reference
531 * on the import.
532 *
533 * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr().
534 */
535 struct ptlrpc_sec * (*create_sec) (struct obd_import *imp,
536 struct ptlrpc_svc_ctx *ctx,
537 struct sptlrpc_flavor *flavor);
538
539 /**
540 * Destructor of ptlrpc_sec. When called, refcount has been dropped
541 * to 0 and all contexts has been destroyed.
542 *
543 * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr().
544 */
545 void (*destroy_sec) (struct ptlrpc_sec *sec);
546
547 /**
548 * Notify that this ptlrpc_sec is going to die. Optionally, policy
549 * module is supposed to set sec->ps_dying and whatever necessary
550 * actions.
551 *
552 * \see plain_kill_sec(), gss_sec_kill().
553 */
554 void (*kill_sec) (struct ptlrpc_sec *sec);
555
556 /**
557 * Given \a vcred, lookup and/or create its context. The policy module
558 * is supposed to maintain its own context cache.
559 * XXX currently \a create and \a remove_dead is always 1, perhaps
560 * should be removed completely.
561 *
562 * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr().
563 */
564 struct ptlrpc_cli_ctx * (*lookup_ctx) (struct ptlrpc_sec *sec,
565 struct vfs_cred *vcred,
566 int create,
567 int remove_dead);
568
569 /**
570 * Called then the reference of \a ctx dropped to 0. The policy module
571 * is supposed to destroy this context or whatever else according to
572 * its cache maintenance mechanism.
573 *
574 * \param sync if zero, we shouldn't wait for the context being
575 * destroyed completely.
576 *
577 * \see plain_release_ctx(), gss_sec_release_ctx_kr().
578 */
579 void (*release_ctx) (struct ptlrpc_sec *sec,
580 struct ptlrpc_cli_ctx *ctx,
581 int sync);
582
583 /**
584 * Flush the context cache.
585 *
586 * \param uid context of which user, -1 means all contexts.
587 * \param grace if zero, the PTLRPC_CTX_UPTODATE_BIT of affected
588 * contexts should be cleared immediately.
589 * \param force if zero, only idle contexts will be flushed.
590 *
591 * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr().
592 */
593 int (*flush_ctx_cache)
594 (struct ptlrpc_sec *sec,
595 uid_t uid,
596 int grace,
597 int force);
598
599 /**
600 * Called periodically by garbage collector to remove dead contexts
601 * from cache.
602 *
603 * \see gss_sec_gc_ctx_kr().
604 */
605 void (*gc_ctx) (struct ptlrpc_sec *sec);
606
607 /**
608 * Given an context \a ctx, install a corresponding reverse service
609 * context on client side.
610 * XXX currently it's only used by GSS module, maybe we should remove
611 * this from general API.
612 */
613 int (*install_rctx)(struct obd_import *imp,
614 struct ptlrpc_sec *sec,
615 struct ptlrpc_cli_ctx *ctx);
616
617 /**
618 * To allocate request buffer for \a req.
619 *
620 * \pre req->rq_reqmsg == NULL.
621 * \pre req->rq_reqbuf == NULL, otherwise it must be pre-allocated,
622 * we are not supposed to free it.
623 * \post if success, req->rq_reqmsg point to a buffer with size
624 * at least \a lustre_msg_size.
625 *
626 * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf().
627 */
628 int (*alloc_reqbuf)(struct ptlrpc_sec *sec,
629 struct ptlrpc_request *req,
630 int lustre_msg_size);
631
632 /**
633 * To free request buffer for \a req.
634 *
635 * \pre req->rq_reqbuf != NULL.
636 *
637 * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf().
638 */
639 void (*free_reqbuf) (struct ptlrpc_sec *sec,
640 struct ptlrpc_request *req);
641
642 /**
643 * To allocate reply buffer for \a req.
644 *
645 * \pre req->rq_repbuf == NULL.
646 * \post if success, req->rq_repbuf point to a buffer with size
647 * req->rq_repbuf_len, the size should be large enough to receive
648 * reply which be transformed from \a lustre_msg_size of clear text.
649 *
650 * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf().
651 */
652 int (*alloc_repbuf)(struct ptlrpc_sec *sec,
653 struct ptlrpc_request *req,
654 int lustre_msg_size);
655
656 /**
657 * To free reply buffer for \a req.
658 *
659 * \pre req->rq_repbuf != NULL.
660 * \post req->rq_repbuf == NULL.
661 * \post req->rq_repbuf_len == 0.
662 *
663 * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf().
664 */
665 void (*free_repbuf) (struct ptlrpc_sec *sec,
666 struct ptlrpc_request *req);
667
668 /**
669 * To expand the request buffer of \a req, thus the \a segment in
670 * the request message pointed by req->rq_reqmsg can accommodate
671 * at least \a newsize of data.
672 *
673 * \pre req->rq_reqmsg->lm_buflens[segment] < newsize.
674 *
675 * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(),
676 * gss_enlarge_reqbuf().
677 */
678 int (*enlarge_reqbuf)
679 (struct ptlrpc_sec *sec,
680 struct ptlrpc_request *req,
681 int segment, int newsize);
682 /*
683 * misc
684 */
685 int (*display) (struct ptlrpc_sec *sec,
686 struct seq_file *seq);
687 };
688
689 /**
690 * server side policy operation vector.
691 */
692 struct ptlrpc_sec_sops {
693 /**
694 * verify an incoming request.
695 *
696 * \pre request message is pointed by req->rq_reqbuf, size is
697 * req->rq_reqdata_len; and the message has been unpacked to
698 * host byte order.
699 *
700 * \retval SECSVC_OK success, req->rq_reqmsg point to request message
701 * in clear text, size is req->rq_reqlen; req->rq_svc_ctx is set;
702 * req->rq_sp_from is decoded from request.
703 * \retval SECSVC_COMPLETE success, the request has been fully
704 * processed, and reply message has been prepared; req->rq_sp_from is
705 * decoded from request.
706 * \retval SECSVC_DROP failed, this request should be dropped.
707 *
708 * \see null_accept(), plain_accept(), gss_svc_accept_kr().
709 */
710 int (*accept) (struct ptlrpc_request *req);
711
712 /**
713 * Perform security transformation upon reply message.
714 *
715 * \pre reply message is pointed by req->rq_reply_state->rs_msg, size
716 * is req->rq_replen.
717 * \post req->rs_repdata_len is the final message size.
718 * \post req->rq_reply_off is set.
719 *
720 * \see null_authorize(), plain_authorize(), gss_svc_authorize().
721 */
722 int (*authorize) (struct ptlrpc_request *req);
723
724 /**
725 * Invalidate server context \a ctx.
726 *
727 * \see gss_svc_invalidate_ctx().
728 */
729 void (*invalidate_ctx)
730 (struct ptlrpc_svc_ctx *ctx);
731
732 /**
733 * Allocate a ptlrpc_reply_state.
734 *
735 * \param msgsize size of the reply message in clear text.
736 * \pre if req->rq_reply_state != NULL, then it's pre-allocated, we
737 * should simply use it; otherwise we'll responsible for allocating
738 * a new one.
739 * \post req->rq_reply_state != NULL;
740 * \post req->rq_reply_state->rs_msg != NULL;
741 *
742 * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs().
743 */
744 int (*alloc_rs) (struct ptlrpc_request *req,
745 int msgsize);
746
747 /**
748 * Free a ptlrpc_reply_state.
749 */
750 void (*free_rs) (struct ptlrpc_reply_state *rs);
751
752 /**
753 * Release the server context \a ctx.
754 *
755 * \see gss_svc_free_ctx().
756 */
757 void (*free_ctx) (struct ptlrpc_svc_ctx *ctx);
758
759 /**
760 * Install a reverse context based on the server context \a ctx.
761 *
762 * \see gss_svc_install_rctx_kr().
763 */
764 int (*install_rctx)(struct obd_import *imp,
765 struct ptlrpc_svc_ctx *ctx);
766
767 /**
768 * Prepare buffer for incoming bulk write.
769 *
770 * \pre desc->bd_iov and desc->bd_iov_count describes the buffer
771 * intended to receive the write.
772 *
773 * \see gss_svc_prep_bulk().
774 */
775 int (*prep_bulk) (struct ptlrpc_request *req,
776 struct ptlrpc_bulk_desc *desc);
777
778 /**
779 * Unwrap the bulk write data.
780 *
781 * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk().
782 */
783 int (*unwrap_bulk) (struct ptlrpc_request *req,
784 struct ptlrpc_bulk_desc *desc);
785
786 /**
787 * Wrap the bulk read data.
788 *
789 * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk().
790 */
791 int (*wrap_bulk) (struct ptlrpc_request *req,
792 struct ptlrpc_bulk_desc *desc);
793 };
794
795 struct ptlrpc_sec_policy {
796 struct module *sp_owner;
797 char *sp_name;
798 __u16 sp_policy; /* policy number */
799 struct ptlrpc_sec_cops *sp_cops; /* client ops */
800 struct ptlrpc_sec_sops *sp_sops; /* server ops */
801 };
802
803 #define PTLRPC_SEC_FL_REVERSE 0x0001 /* reverse sec */
804 #define PTLRPC_SEC_FL_ROOTONLY 0x0002 /* treat everyone as root */
805 #define PTLRPC_SEC_FL_UDESC 0x0004 /* ship udesc */
806 #define PTLRPC_SEC_FL_BULK 0x0008 /* intensive bulk i/o expected */
807 #define PTLRPC_SEC_FL_PAG 0x0010 /* PAG mode */
808
809 /**
810 * The ptlrpc_sec represents the client side ptlrpc security facilities,
811 * each obd_import (both regular and reverse import) must associate with
812 * a ptlrpc_sec.
813 *
814 * \see sptlrpc_import_sec_adapt().
815 */
816 struct ptlrpc_sec {
817 struct ptlrpc_sec_policy *ps_policy;
818 atomic_t ps_refcount;
819 /** statistic only */
820 atomic_t ps_nctx;
821 /** unique identifier */
822 int ps_id;
823 struct sptlrpc_flavor ps_flvr;
824 enum lustre_sec_part ps_part;
825 /** after set, no more new context will be created */
826 unsigned int ps_dying:1;
827 /** owning import */
828 struct obd_import *ps_import;
829 spinlock_t ps_lock;
830
831 /*
832 * garbage collection
833 */
834 struct list_head ps_gc_list;
835 unsigned long ps_gc_interval; /* in seconds */
836 unsigned long ps_gc_next; /* in seconds */
837 };
838
sec_is_reverse(struct ptlrpc_sec * sec)839 static inline int sec_is_reverse(struct ptlrpc_sec *sec)
840 {
841 return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE);
842 }
843
sec_is_rootonly(struct ptlrpc_sec * sec)844 static inline int sec_is_rootonly(struct ptlrpc_sec *sec)
845 {
846 return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_ROOTONLY);
847 }
848
849
850 struct ptlrpc_svc_ctx {
851 atomic_t sc_refcount;
852 struct ptlrpc_sec_policy *sc_policy;
853 };
854
855 /*
856 * user identity descriptor
857 */
858 #define LUSTRE_MAX_GROUPS (128)
859
860 struct ptlrpc_user_desc {
861 __u32 pud_uid;
862 __u32 pud_gid;
863 __u32 pud_fsuid;
864 __u32 pud_fsgid;
865 __u32 pud_cap;
866 __u32 pud_ngroups;
867 __u32 pud_groups[0];
868 };
869
870 /*
871 * bulk flavors
872 */
873 enum sptlrpc_bulk_hash_alg {
874 BULK_HASH_ALG_NULL = 0,
875 BULK_HASH_ALG_ADLER32,
876 BULK_HASH_ALG_CRC32,
877 BULK_HASH_ALG_MD5,
878 BULK_HASH_ALG_SHA1,
879 BULK_HASH_ALG_SHA256,
880 BULK_HASH_ALG_SHA384,
881 BULK_HASH_ALG_SHA512,
882 BULK_HASH_ALG_MAX
883 };
884
885 const char *sptlrpc_get_hash_name(__u8 hash_alg);
886 __u8 sptlrpc_get_hash_alg(const char *algname);
887
888 enum {
889 BSD_FL_ERR = 1,
890 };
891
892 struct ptlrpc_bulk_sec_desc {
893 __u8 bsd_version; /* 0 */
894 __u8 bsd_type; /* SPTLRPC_BULK_XXX */
895 __u8 bsd_svc; /* SPTLRPC_BULK_SVC_XXXX */
896 __u8 bsd_flags; /* flags */
897 __u32 bsd_nob; /* nob of bulk data */
898 __u8 bsd_data[0]; /* policy-specific token */
899 };
900
901
902 /*
903 * round size up to next power of 2, for slab allocation.
904 * @size must be sane (can't overflow after round up)
905 */
size_roundup_power2(int size)906 static inline int size_roundup_power2(int size)
907 {
908 size--;
909 size |= size >> 1;
910 size |= size >> 2;
911 size |= size >> 4;
912 size |= size >> 8;
913 size |= size >> 16;
914 size++;
915 return size;
916 }
917
918 /*
919 * internal support libraries
920 */
921 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
922 int segment, int newsize);
923
924 /*
925 * security policies
926 */
927 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy);
928 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy);
929
930 __u32 sptlrpc_name2flavor_base(const char *name);
931 const char *sptlrpc_flavor2name_base(__u32 flvr);
932 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
933 char *buf, int bufsize);
934 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize);
935 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize);
936
937 static inline
sptlrpc_policy_get(struct ptlrpc_sec_policy * policy)938 struct ptlrpc_sec_policy *sptlrpc_policy_get(struct ptlrpc_sec_policy *policy)
939 {
940 __module_get(policy->sp_owner);
941 return policy;
942 }
943
944 static inline
sptlrpc_policy_put(struct ptlrpc_sec_policy * policy)945 void sptlrpc_policy_put(struct ptlrpc_sec_policy *policy)
946 {
947 module_put(policy->sp_owner);
948 }
949
950 /*
951 * client credential
952 */
953 static inline
cli_ctx_status(struct ptlrpc_cli_ctx * ctx)954 unsigned long cli_ctx_status(struct ptlrpc_cli_ctx *ctx)
955 {
956 return (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK);
957 }
958
959 static inline
cli_ctx_is_ready(struct ptlrpc_cli_ctx * ctx)960 int cli_ctx_is_ready(struct ptlrpc_cli_ctx *ctx)
961 {
962 return (cli_ctx_status(ctx) == PTLRPC_CTX_UPTODATE);
963 }
964
965 static inline
cli_ctx_is_refreshed(struct ptlrpc_cli_ctx * ctx)966 int cli_ctx_is_refreshed(struct ptlrpc_cli_ctx *ctx)
967 {
968 return (cli_ctx_status(ctx) != 0);
969 }
970
971 static inline
cli_ctx_is_uptodate(struct ptlrpc_cli_ctx * ctx)972 int cli_ctx_is_uptodate(struct ptlrpc_cli_ctx *ctx)
973 {
974 return ((ctx->cc_flags & PTLRPC_CTX_UPTODATE) != 0);
975 }
976
977 static inline
cli_ctx_is_error(struct ptlrpc_cli_ctx * ctx)978 int cli_ctx_is_error(struct ptlrpc_cli_ctx *ctx)
979 {
980 return ((ctx->cc_flags & PTLRPC_CTX_ERROR) != 0);
981 }
982
983 static inline
cli_ctx_is_dead(struct ptlrpc_cli_ctx * ctx)984 int cli_ctx_is_dead(struct ptlrpc_cli_ctx *ctx)
985 {
986 return ((ctx->cc_flags & (PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR)) != 0);
987 }
988
989 static inline
cli_ctx_is_eternal(struct ptlrpc_cli_ctx * ctx)990 int cli_ctx_is_eternal(struct ptlrpc_cli_ctx *ctx)
991 {
992 return ((ctx->cc_flags & PTLRPC_CTX_ETERNAL) != 0);
993 }
994
995 /*
996 * sec get/put
997 */
998 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec);
999 void sptlrpc_sec_put(struct ptlrpc_sec *sec);
1000
1001 /*
1002 * internal apis which only used by policy implementation
1003 */
1004 int sptlrpc_get_next_secid(void);
1005 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec);
1006
1007 /*
1008 * exported client context api
1009 */
1010 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx);
1011 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync);
1012 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx);
1013 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx);
1014 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize);
1015
1016 /*
1017 * exported client context wrap/buffers
1018 */
1019 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req);
1020 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req);
1021 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize);
1022 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req);
1023 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize);
1024 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req);
1025 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1026 int segment, int newsize);
1027 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1028 struct ptlrpc_request **req_ret);
1029 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req);
1030
1031 void sptlrpc_request_out_callback(struct ptlrpc_request *req);
1032
1033 /*
1034 * exported higher interface of import & request
1035 */
1036 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1037 struct ptlrpc_svc_ctx *ctx,
1038 struct sptlrpc_flavor *flvr);
1039 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp);
1040 void sptlrpc_import_sec_put(struct obd_import *imp);
1041
1042 int sptlrpc_import_check_ctx(struct obd_import *imp);
1043 void sptlrpc_import_flush_root_ctx(struct obd_import *imp);
1044 void sptlrpc_import_flush_my_ctx(struct obd_import *imp);
1045 void sptlrpc_import_flush_all_ctx(struct obd_import *imp);
1046 int sptlrpc_req_get_ctx(struct ptlrpc_request *req);
1047 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync);
1048 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout);
1049 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req);
1050 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode);
1051
1052 int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule);
1053
1054 /* gc */
1055 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec);
1056 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec);
1057 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx);
1058
1059 /* misc */
1060 const char *sec2target_str(struct ptlrpc_sec *sec);
1061 /*
1062 * lprocfs
1063 */
1064 #if defined (CONFIG_PROC_FS)
1065 struct proc_dir_entry;
1066 extern struct proc_dir_entry *sptlrpc_proc_root;
1067 int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev);
1068 #else
1069 #define sptlrpc_proc_root NULL
sptlrpc_lprocfs_cliobd_attach(struct obd_device * dev)1070 static inline int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev)
1071 { return 0; }
1072 #endif
1073
1074 /*
1075 * server side
1076 */
1077 enum secsvc_accept_res {
1078 SECSVC_OK = 0,
1079 SECSVC_COMPLETE,
1080 SECSVC_DROP,
1081 };
1082
1083 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req);
1084 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen);
1085 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req);
1086 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs);
1087 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req);
1088 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req);
1089 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req);
1090
1091 int sptlrpc_target_export_check(struct obd_export *exp,
1092 struct ptlrpc_request *req);
1093 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1094 struct sptlrpc_rule_set *rset);
1095
1096 /*
1097 * reverse context
1098 */
1099 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1100 struct ptlrpc_svc_ctx *ctx);
1101 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1102 struct ptlrpc_cli_ctx *ctx);
1103
1104 /* bulk security api */
1105 int sptlrpc_enc_pool_add_user(void);
1106 int sptlrpc_enc_pool_del_user(void);
1107 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc);
1108 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc);
1109
1110 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
1111 struct ptlrpc_bulk_desc *desc);
1112 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
1113 struct ptlrpc_bulk_desc *desc,
1114 int nob);
1115 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
1116 struct ptlrpc_bulk_desc *desc);
1117
1118 /* bulk helpers (internal use only by policies) */
1119 int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
1120 void *buf, int buflen);
1121
1122 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed);
1123
1124 /* user descriptor helpers */
sptlrpc_user_desc_size(int ngroups)1125 static inline int sptlrpc_user_desc_size(int ngroups)
1126 {
1127 return sizeof(struct ptlrpc_user_desc) + ngroups * sizeof(__u32);
1128 }
1129
1130 int sptlrpc_current_user_desc_size(void);
1131 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset);
1132 int sptlrpc_unpack_user_desc(struct lustre_msg *req, int offset, int swabbed);
1133
1134
1135 #define CFS_CAP_CHOWN_MASK (1 << CFS_CAP_CHOWN)
1136 #define CFS_CAP_SYS_RESOURCE_MASK (1 << CFS_CAP_SYS_RESOURCE)
1137
1138 enum {
1139 LUSTRE_SEC_NONE = 0,
1140 LUSTRE_SEC_REMOTE = 1,
1141 LUSTRE_SEC_SPECIFY = 2,
1142 LUSTRE_SEC_ALL = 3
1143 };
1144
1145 /** @} sptlrpc */
1146
1147 #endif /* _LUSTRE_SEC_H_ */
1148