1/*
2 * COPYRIGHT (c) 2008
3 * The Regents of the University of Michigan
4 * ALL RIGHTS RESERVED
5 *
6 * Permission is granted to use, copy, create derivative works
7 * and redistribute this software and such derivative works
8 * for any purpose, so long as the name of The University of
9 * Michigan is not used in any advertising or publicity
10 * pertaining to the use of distribution of this software
11 * without specific, written prior authorization.  If the
12 * above copyright notice or any other identification of the
13 * University of Michigan is included in any copy of any
14 * portion of this software, then the disclaimer below must
15 * also be included.
16 *
17 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
18 * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
19 * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
20 * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
21 * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
23 * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
24 * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
25 * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
26 * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
27 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGES.
29 */
30
31#include <linux/types.h>
32#include <linux/jiffies.h>
33#include <linux/sunrpc/gss_krb5.h>
34#include <linux/random.h>
35#include <linux/pagemap.h>
36#include <linux/crypto.h>
37
38#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
39# define RPCDBG_FACILITY	RPCDBG_AUTH
40#endif
41
42static inline int
43gss_krb5_padding(int blocksize, int length)
44{
45	return blocksize - (length % blocksize);
46}
47
48static inline void
49gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
50{
51	int padding = gss_krb5_padding(blocksize, buf->len - offset);
52	char *p;
53	struct kvec *iov;
54
55	if (buf->page_len || buf->tail[0].iov_len)
56		iov = &buf->tail[0];
57	else
58		iov = &buf->head[0];
59	p = iov->iov_base + iov->iov_len;
60	iov->iov_len += padding;
61	buf->len += padding;
62	memset(p, padding, padding);
63}
64
65static inline int
66gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
67{
68	u8 *ptr;
69	u8 pad;
70	size_t len = buf->len;
71
72	if (len <= buf->head[0].iov_len) {
73		pad = *(u8 *)(buf->head[0].iov_base + len - 1);
74		if (pad > buf->head[0].iov_len)
75			return -EINVAL;
76		buf->head[0].iov_len -= pad;
77		goto out;
78	} else
79		len -= buf->head[0].iov_len;
80	if (len <= buf->page_len) {
81		unsigned int last = (buf->page_base + len - 1)
82					>>PAGE_CACHE_SHIFT;
83		unsigned int offset = (buf->page_base + len - 1)
84					& (PAGE_CACHE_SIZE - 1);
85		ptr = kmap_atomic(buf->pages[last]);
86		pad = *(ptr + offset);
87		kunmap_atomic(ptr);
88		goto out;
89	} else
90		len -= buf->page_len;
91	BUG_ON(len > buf->tail[0].iov_len);
92	pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
93out:
94	/* XXX: NOTE: we do not adjust the page lengths--they represent
95	 * a range of data in the real filesystem page cache, and we need
96	 * to know that range so the xdr code can properly place read data.
97	 * However adjusting the head length, as we do above, is harmless.
98	 * In the case of a request that fits into a single page, the server
99	 * also uses length and head length together to determine the original
100	 * start of the request to copy the request for deferal; so it's
101	 * easier on the server if we adjust head and tail length in tandem.
102	 * It's not really a problem that we don't fool with the page and
103	 * tail lengths, though--at worst badly formed xdr might lead the
104	 * server to attempt to parse the padding.
105	 * XXX: Document all these weird requirements for gss mechanism
106	 * wrap/unwrap functions. */
107	if (pad > blocksize)
108		return -EINVAL;
109	if (buf->len > pad)
110		buf->len -= pad;
111	else
112		return -EINVAL;
113	return 0;
114}
115
116void
117gss_krb5_make_confounder(char *p, u32 conflen)
118{
119	static u64 i = 0;
120	u64 *q = (u64 *)p;
121
122	/* rfc1964 claims this should be "random".  But all that's really
123	 * necessary is that it be unique.  And not even that is necessary in
124	 * our case since our "gssapi" implementation exists only to support
125	 * rpcsec_gss, so we know that the only buffers we will ever encrypt
126	 * already begin with a unique sequence number.  Just to hedge my bets
127	 * I'll make a half-hearted attempt at something unique, but ensuring
128	 * uniqueness would mean worrying about atomicity and rollover, and I
129	 * don't care enough. */
130
131	/* initialize to random value */
132	if (i == 0) {
133		i = prandom_u32();
134		i = (i << 32) | prandom_u32();
135	}
136
137	switch (conflen) {
138	case 16:
139		*q++ = i++;
140		/* fall through */
141	case 8:
142		*q++ = i++;
143		break;
144	default:
145		BUG();
146	}
147}
148
149/* Assumptions: the head and tail of inbuf are ours to play with.
150 * The pages, however, may be real pages in the page cache and we replace
151 * them with scratch pages from **pages before writing to them. */
152/* XXX: obviously the above should be documentation of wrap interface,
153 * and shouldn't be in this kerberos-specific file. */
154
155/* XXX factor out common code with seal/unseal. */
156
157static u32
158gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
159		struct xdr_buf *buf, struct page **pages)
160{
161	char			cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
162	struct xdr_netobj	md5cksum = {.len = sizeof(cksumdata),
163					    .data = cksumdata};
164	int			blocksize = 0, plainlen;
165	unsigned char		*ptr, *msg_start;
166	s32			now;
167	int			headlen;
168	struct page		**tmp_pages;
169	u32			seq_send;
170	u8			*cksumkey;
171	u32			conflen = kctx->gk5e->conflen;
172
173	dprintk("RPC:       %s\n", __func__);
174
175	now = get_seconds();
176
177	blocksize = crypto_blkcipher_blocksize(kctx->enc);
178	gss_krb5_add_padding(buf, offset, blocksize);
179	BUG_ON((buf->len - offset) % blocksize);
180	plainlen = conflen + buf->len - offset;
181
182	headlen = g_token_size(&kctx->mech_used,
183		GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
184		(buf->len - offset);
185
186	ptr = buf->head[0].iov_base + offset;
187	/* shift data to make room for header. */
188	xdr_extend_head(buf, offset, headlen);
189
190	/* XXX Would be cleverer to encrypt while copying. */
191	BUG_ON((buf->len - offset - headlen) % blocksize);
192
193	g_make_token_header(&kctx->mech_used,
194				GSS_KRB5_TOK_HDR_LEN +
195				kctx->gk5e->cksumlength + plainlen, &ptr);
196
197
198	/* ptr now at header described in rfc 1964, section 1.2.1: */
199	ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
200	ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
201
202	msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;
203
204	/*
205	 * signalg and sealalg are stored as if they were converted from LE
206	 * to host endian, even though they're opaque pairs of bytes according
207	 * to the RFC.
208	 */
209	*(__le16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);
210	*(__le16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
211	ptr[6] = 0xff;
212	ptr[7] = 0xff;
213
214	gss_krb5_make_confounder(msg_start, conflen);
215
216	if (kctx->gk5e->keyed_cksum)
217		cksumkey = kctx->cksum;
218	else
219		cksumkey = NULL;
220
221	/* XXXJBF: UGH!: */
222	tmp_pages = buf->pages;
223	buf->pages = pages;
224	if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
225					cksumkey, KG_USAGE_SEAL, &md5cksum))
226		return GSS_S_FAILURE;
227	buf->pages = tmp_pages;
228
229	memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
230
231	spin_lock(&krb5_seq_lock);
232	seq_send = kctx->seq_send++;
233	spin_unlock(&krb5_seq_lock);
234
235	/* XXX would probably be more efficient to compute checksum
236	 * and encrypt at the same time: */
237	if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,
238			       seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
239		return GSS_S_FAILURE;
240
241	if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
242		struct crypto_blkcipher *cipher;
243		int err;
244		cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
245						CRYPTO_ALG_ASYNC);
246		if (IS_ERR(cipher))
247			return GSS_S_FAILURE;
248
249		krb5_rc4_setup_enc_key(kctx, cipher, seq_send);
250
251		err = gss_encrypt_xdr_buf(cipher, buf,
252					  offset + headlen - conflen, pages);
253		crypto_free_blkcipher(cipher);
254		if (err)
255			return GSS_S_FAILURE;
256	} else {
257		if (gss_encrypt_xdr_buf(kctx->enc, buf,
258					offset + headlen - conflen, pages))
259			return GSS_S_FAILURE;
260	}
261
262	return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
263}
264
265static u32
266gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
267{
268	int			signalg;
269	int			sealalg;
270	char			cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
271	struct xdr_netobj	md5cksum = {.len = sizeof(cksumdata),
272					    .data = cksumdata};
273	s32			now;
274	int			direction;
275	s32			seqnum;
276	unsigned char		*ptr;
277	int			bodysize;
278	void			*data_start, *orig_start;
279	int			data_len;
280	int			blocksize;
281	u32			conflen = kctx->gk5e->conflen;
282	int			crypt_offset;
283	u8			*cksumkey;
284
285	dprintk("RPC:       gss_unwrap_kerberos\n");
286
287	ptr = (u8 *)buf->head[0].iov_base + offset;
288	if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
289					buf->len - offset))
290		return GSS_S_DEFECTIVE_TOKEN;
291
292	if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
293	    (ptr[1] !=  (KG_TOK_WRAP_MSG & 0xff)))
294		return GSS_S_DEFECTIVE_TOKEN;
295
296	/* XXX sanity-check bodysize?? */
297
298	/* get the sign and seal algorithms */
299
300	signalg = ptr[2] + (ptr[3] << 8);
301	if (signalg != kctx->gk5e->signalg)
302		return GSS_S_DEFECTIVE_TOKEN;
303
304	sealalg = ptr[4] + (ptr[5] << 8);
305	if (sealalg != kctx->gk5e->sealalg)
306		return GSS_S_DEFECTIVE_TOKEN;
307
308	if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
309		return GSS_S_DEFECTIVE_TOKEN;
310
311	/*
312	 * Data starts after token header and checksum.  ptr points
313	 * to the beginning of the token header
314	 */
315	crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
316					(unsigned char *)buf->head[0].iov_base;
317
318	/*
319	 * Need plaintext seqnum to derive encryption key for arcfour-hmac
320	 */
321	if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
322			     ptr + 8, &direction, &seqnum))
323		return GSS_S_BAD_SIG;
324
325	if ((kctx->initiate && direction != 0xff) ||
326	    (!kctx->initiate && direction != 0))
327		return GSS_S_BAD_SIG;
328
329	if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
330		struct crypto_blkcipher *cipher;
331		int err;
332
333		cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
334						CRYPTO_ALG_ASYNC);
335		if (IS_ERR(cipher))
336			return GSS_S_FAILURE;
337
338		krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
339
340		err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
341		crypto_free_blkcipher(cipher);
342		if (err)
343			return GSS_S_DEFECTIVE_TOKEN;
344	} else {
345		if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
346			return GSS_S_DEFECTIVE_TOKEN;
347	}
348
349	if (kctx->gk5e->keyed_cksum)
350		cksumkey = kctx->cksum;
351	else
352		cksumkey = NULL;
353
354	if (make_checksum(kctx, ptr, 8, buf, crypt_offset,
355					cksumkey, KG_USAGE_SEAL, &md5cksum))
356		return GSS_S_FAILURE;
357
358	if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
359						kctx->gk5e->cksumlength))
360		return GSS_S_BAD_SIG;
361
362	/* it got through unscathed.  Make sure the context is unexpired */
363
364	now = get_seconds();
365
366	if (now > kctx->endtime)
367		return GSS_S_CONTEXT_EXPIRED;
368
369	/* do sequencing checks */
370
371	/* Copy the data back to the right position.  XXX: Would probably be
372	 * better to copy and encrypt at the same time. */
373
374	blocksize = crypto_blkcipher_blocksize(kctx->enc);
375	data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
376					conflen;
377	orig_start = buf->head[0].iov_base + offset;
378	data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
379	memmove(orig_start, data_start, data_len);
380	buf->head[0].iov_len -= (data_start - orig_start);
381	buf->len -= (data_start - orig_start);
382
383	if (gss_krb5_remove_padding(buf, blocksize))
384		return GSS_S_DEFECTIVE_TOKEN;
385
386	return GSS_S_COMPLETE;
387}
388
389/*
390 * We can shift data by up to LOCAL_BUF_LEN bytes in a pass.  If we need
391 * to do more than that, we shift repeatedly.  Kevin Coffman reports
392 * seeing 28 bytes as the value used by Microsoft clients and servers
393 * with AES, so this constant is chosen to allow handling 28 in one pass
394 * without using too much stack space.
395 *
396 * If that proves to a problem perhaps we could use a more clever
397 * algorithm.
398 */
399#define LOCAL_BUF_LEN 32u
400
401static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift)
402{
403	char head[LOCAL_BUF_LEN];
404	char tmp[LOCAL_BUF_LEN];
405	unsigned int this_len, i;
406
407	BUG_ON(shift > LOCAL_BUF_LEN);
408
409	read_bytes_from_xdr_buf(buf, 0, head, shift);
410	for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) {
411		this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift));
412		read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len);
413		write_bytes_to_xdr_buf(buf, i, tmp, this_len);
414	}
415	write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift);
416}
417
418static void _rotate_left(struct xdr_buf *buf, unsigned int shift)
419{
420	int shifted = 0;
421	int this_shift;
422
423	shift %= buf->len;
424	while (shifted < shift) {
425		this_shift = min(shift - shifted, LOCAL_BUF_LEN);
426		rotate_buf_a_little(buf, this_shift);
427		shifted += this_shift;
428	}
429}
430
431static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift)
432{
433	struct xdr_buf subbuf;
434
435	xdr_buf_subsegment(buf, &subbuf, base, buf->len - base);
436	_rotate_left(&subbuf, shift);
437}
438
439static u32
440gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
441		     struct xdr_buf *buf, struct page **pages)
442{
443	int		blocksize;
444	u8		*ptr, *plainhdr;
445	s32		now;
446	u8		flags = 0x00;
447	__be16		*be16ptr;
448	__be64		*be64ptr;
449	u32		err;
450
451	dprintk("RPC:       %s\n", __func__);
452
453	if (kctx->gk5e->encrypt_v2 == NULL)
454		return GSS_S_FAILURE;
455
456	/* make room for gss token header */
457	if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))
458		return GSS_S_FAILURE;
459
460	/* construct gss token header */
461	ptr = plainhdr = buf->head[0].iov_base + offset;
462	*ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff);
463	*ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff);
464
465	if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
466		flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR;
467	if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0)
468		flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY;
469	/* We always do confidentiality in wrap tokens */
470	flags |= KG2_TOKEN_FLAG_SEALED;
471
472	*ptr++ = flags;
473	*ptr++ = 0xff;
474	be16ptr = (__be16 *)ptr;
475
476	blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc);
477	*be16ptr++ = 0;
478	/* "inner" token header always uses 0 for RRC */
479	*be16ptr++ = 0;
480
481	be64ptr = (__be64 *)be16ptr;
482	spin_lock(&krb5_seq_lock);
483	*be64ptr = cpu_to_be64(kctx->seq_send64++);
484	spin_unlock(&krb5_seq_lock);
485
486	err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages);
487	if (err)
488		return err;
489
490	now = get_seconds();
491	return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
492}
493
494static u32
495gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
496{
497	s32		now;
498	u8		*ptr;
499	u8		flags = 0x00;
500	u16		ec, rrc;
501	int		err;
502	u32		headskip, tailskip;
503	u8		decrypted_hdr[GSS_KRB5_TOK_HDR_LEN];
504	unsigned int	movelen;
505
506
507	dprintk("RPC:       %s\n", __func__);
508
509	if (kctx->gk5e->decrypt_v2 == NULL)
510		return GSS_S_FAILURE;
511
512	ptr = buf->head[0].iov_base + offset;
513
514	if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)
515		return GSS_S_DEFECTIVE_TOKEN;
516
517	flags = ptr[2];
518	if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
519	    (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
520		return GSS_S_BAD_SIG;
521
522	if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) {
523		dprintk("%s: token missing expected sealed flag\n", __func__);
524		return GSS_S_DEFECTIVE_TOKEN;
525	}
526
527	if (ptr[3] != 0xff)
528		return GSS_S_DEFECTIVE_TOKEN;
529
530	ec = be16_to_cpup((__be16 *)(ptr + 4));
531	rrc = be16_to_cpup((__be16 *)(ptr + 6));
532
533	/*
534	 * NOTE: the sequence number at ptr + 8 is skipped, rpcsec_gss
535	 * doesn't want it checked; see page 6 of rfc 2203.
536	 */
537
538	if (rrc != 0)
539		rotate_left(offset + 16, buf, rrc);
540
541	err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
542					&headskip, &tailskip);
543	if (err)
544		return GSS_S_FAILURE;
545
546	/*
547	 * Retrieve the decrypted gss token header and verify
548	 * it against the original
549	 */
550	err = read_bytes_from_xdr_buf(buf,
551				buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
552				decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
553	if (err) {
554		dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
555		return GSS_S_FAILURE;
556	}
557	if (memcmp(ptr, decrypted_hdr, 6)
558				|| memcmp(ptr + 8, decrypted_hdr + 8, 8)) {
559		dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__);
560		return GSS_S_FAILURE;
561	}
562
563	/* do sequencing checks */
564
565	/* it got through unscathed.  Make sure the context is unexpired */
566	now = get_seconds();
567	if (now > kctx->endtime)
568		return GSS_S_CONTEXT_EXPIRED;
569
570	/*
571	 * Move the head data back to the right position in xdr_buf.
572	 * We ignore any "ec" data since it might be in the head or
573	 * the tail, and we really don't need to deal with it.
574	 * Note that buf->head[0].iov_len may indicate the available
575	 * head buffer space rather than that actually occupied.
576	 */
577	movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
578	movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
579	BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
580							buf->head[0].iov_len);
581	memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
582	buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
583	buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
584
585	/* Trim off the trailing "extra count" and checksum blob */
586	xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
587	return GSS_S_COMPLETE;
588}
589
590u32
591gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
592		  struct xdr_buf *buf, struct page **pages)
593{
594	struct krb5_ctx	*kctx = gctx->internal_ctx_id;
595
596	switch (kctx->enctype) {
597	default:
598		BUG();
599	case ENCTYPE_DES_CBC_RAW:
600	case ENCTYPE_DES3_CBC_RAW:
601	case ENCTYPE_ARCFOUR_HMAC:
602		return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
603	case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
604	case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
605		return gss_wrap_kerberos_v2(kctx, offset, buf, pages);
606	}
607}
608
609u32
610gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
611{
612	struct krb5_ctx	*kctx = gctx->internal_ctx_id;
613
614	switch (kctx->enctype) {
615	default:
616		BUG();
617	case ENCTYPE_DES_CBC_RAW:
618	case ENCTYPE_DES3_CBC_RAW:
619	case ENCTYPE_ARCFOUR_HMAC:
620		return gss_unwrap_kerberos_v1(kctx, offset, buf);
621	case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
622	case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
623		return gss_unwrap_kerberos_v2(kctx, offset, buf);
624	}
625}
626
627