2 * Copyright (c) 2003, PADL Software Pty Ltd.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of PADL Software nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL PADL SOFTWARE OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include "gsskrb5_locl.h"
36 * Implementation of RFC 4121
39 #define CFXSentByAcceptor (1 << 0)
40 #define CFXSealed (1 << 1)
41 #define CFXAcceptorSubkey (1 << 2)
44 _gsskrb5cfx_wrap_length_cfx(krb5_context context,
49 size_t *output_length,
56 /* 16-byte header is always first */
57 *output_length = sizeof(gss_cfx_wrap_token_desc);
60 ret = krb5_crypto_get_checksum_type(context, crypto, &type);
64 ret = krb5_checksumsize(context, type, cksumsize);
71 /* Header is concatenated with data before encryption */
72 input_length += sizeof(gss_cfx_wrap_token_desc);
75 ret = krb5_crypto_getblocksize(context, crypto, &padsize);
77 ret = krb5_crypto_getpadsize(context, crypto, &padsize);
84 *padlength = padsize - (input_length % padsize);
86 /* We add the pad ourselves (noted here for completeness only) */
87 input_length += *padlength;
90 *output_length += krb5_get_wrapped_length(context,
91 crypto, input_length);
93 /* Checksum is concatenated with data */
94 *output_length += input_length + *cksumsize;
97 assert(*output_length > input_length);
103 _gssapi_wrap_size_cfx(OM_uint32 *minor_status,
104 const gsskrb5_ctx ctx,
105 krb5_context context,
108 OM_uint32 req_output_size,
109 OM_uint32 *max_input_size)
115 /* 16-byte header is always first */
116 if (req_output_size < 16)
118 req_output_size -= 16;
121 size_t wrapped_size, sz;
123 wrapped_size = req_output_size + 1;
126 sz = krb5_get_wrapped_length(context,
127 ctx->crypto, wrapped_size);
128 } while (wrapped_size && sz > req_output_size);
129 if (wrapped_size == 0)
133 if (wrapped_size < 16)
138 *max_input_size = wrapped_size;
143 ret = krb5_crypto_get_checksum_type(context, ctx->crypto, &type);
147 ret = krb5_checksumsize(context, type, &cksumsize);
151 if (req_output_size < cksumsize)
154 /* Checksum is concatenated with data */
155 *max_input_size = req_output_size - cksumsize;
162 * Rotate "rrc" bytes to the front or back
165 static krb5_error_code
166 rrc_rotate(void *data, size_t len, uint16_t rrc, krb5_boolean unrotate)
168 u_char *tmp, buf[256];
181 if (rrc <= sizeof(buf)) {
190 memcpy(tmp, data, rrc);
191 memmove(data, (u_char *)data + rrc, left);
192 memcpy((u_char *)data + left, tmp, rrc);
194 memcpy(tmp, (u_char *)data + left, rrc);
195 memmove((u_char *)data + rrc, data, left);
196 memcpy(data, tmp, rrc);
199 if (rrc > sizeof(buf))
205 gss_iov_buffer_desc *
206 _gk_find_buffer(gss_iov_buffer_desc *iov, int iov_count, OM_uint32 type)
210 for (i = 0; i < iov_count; i++)
211 if (type == GSS_IOV_BUFFER_TYPE(iov[i].type))
217 _gk_allocate_buffer(OM_uint32 *minor_status, gss_iov_buffer_desc *buffer, size_t size)
219 if (buffer->type & GSS_IOV_BUFFER_FLAG_ALLOCATED) {
220 if (buffer->buffer.length == size)
221 return GSS_S_COMPLETE;
222 free(buffer->buffer.value);
225 buffer->buffer.value = malloc(size);
226 buffer->buffer.length = size;
227 if (buffer->buffer.value == NULL) {
228 *minor_status = ENOMEM;
229 return GSS_S_FAILURE;
231 buffer->type |= GSS_IOV_BUFFER_FLAG_ALLOCATED;
233 return GSS_S_COMPLETE;
238 _gk_verify_buffers(OM_uint32 *minor_status,
239 const gsskrb5_ctx ctx,
240 const gss_iov_buffer_desc *header,
241 const gss_iov_buffer_desc *padding,
242 const gss_iov_buffer_desc *trailer)
244 if (header == NULL) {
245 *minor_status = EINVAL;
246 return GSS_S_FAILURE;
249 if (IS_DCE_STYLE(ctx)) {
251 * In DCE style mode we reject having a padding or trailer buffer
254 *minor_status = EINVAL;
255 return GSS_S_FAILURE;
258 *minor_status = EINVAL;
259 return GSS_S_FAILURE;
263 * In non-DCE style mode we require having a padding buffer
265 if (padding == NULL) {
266 *minor_status = EINVAL;
267 return GSS_S_FAILURE;
272 return GSS_S_COMPLETE;
277 _gssapi_wrap_cfx_iov(OM_uint32 *minor_status,
279 krb5_context context,
282 gss_iov_buffer_desc *iov,
285 OM_uint32 major_status, junk;
286 gss_iov_buffer_desc *header, *trailer, *padding;
287 size_t gsshsize, k5hsize;
288 size_t gsstsize, k5tsize;
289 size_t rrc = 0, ec = 0;
291 gss_cfx_wrap_token token;
295 krb5_crypto_iov *data = NULL;
297 header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
298 if (header == NULL) {
299 *minor_status = EINVAL;
300 return GSS_S_FAILURE;
303 padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
304 if (padding != NULL) {
305 padding->buffer.length = 0;
308 trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
310 major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
311 if (major_status != GSS_S_COMPLETE) {
321 for (i = 0; i < iov_count; i++) {
322 switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
323 case GSS_IOV_BUFFER_TYPE_DATA:
324 size += iov[i].buffer.length;
331 size += sizeof(gss_cfx_wrap_token_desc);
333 *minor_status = krb5_crypto_length(context, ctx->crypto,
334 KRB5_CRYPTO_TYPE_HEADER,
337 return GSS_S_FAILURE;
339 *minor_status = krb5_crypto_length(context, ctx->crypto,
340 KRB5_CRYPTO_TYPE_TRAILER,
343 return GSS_S_FAILURE;
345 *minor_status = krb5_crypto_length(context, ctx->crypto,
346 KRB5_CRYPTO_TYPE_PADDING,
349 return GSS_S_FAILURE;
352 k5psize = k5pbase - (size % k5pbase);
357 if (k5psize == 0 && IS_DCE_STYLE(ctx)) {
358 *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
361 return GSS_S_FAILURE;
367 gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
368 gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
370 if (IS_DCE_STYLE(ctx)) {
371 *minor_status = EINVAL;
372 return GSS_S_FAILURE;
376 *minor_status = krb5_crypto_length(context, ctx->crypto,
377 KRB5_CRYPTO_TYPE_CHECKSUM,
380 return GSS_S_FAILURE;
382 gsshsize = sizeof(gss_cfx_wrap_token_desc);
390 if (trailer == NULL) {
392 if (IS_DCE_STYLE(ctx))
394 gsshsize += gsstsize;
396 } else if (GSS_IOV_BUFFER_FLAGS(trailer->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
397 major_status = _gk_allocate_buffer(minor_status, trailer, gsstsize);
400 } else if (trailer->buffer.length < gsstsize) {
401 *minor_status = KRB5_BAD_MSIZE;
402 major_status = GSS_S_FAILURE;
405 trailer->buffer.length = gsstsize;
411 if (GSS_IOV_BUFFER_FLAGS(header->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
412 major_status = _gk_allocate_buffer(minor_status, header, gsshsize);
413 if (major_status != GSS_S_COMPLETE)
415 } else if (header->buffer.length < gsshsize) {
416 *minor_status = KRB5_BAD_MSIZE;
417 major_status = GSS_S_FAILURE;
420 header->buffer.length = gsshsize;
422 token = (gss_cfx_wrap_token)header->buffer.value;
424 token->TOK_ID[0] = 0x05;
425 token->TOK_ID[1] = 0x04;
427 token->Filler = 0xFF;
429 if ((ctx->more_flags & LOCAL) == 0)
430 token->Flags |= CFXSentByAcceptor;
432 if (ctx->more_flags & ACCEPTOR_SUBKEY)
433 token->Flags |= CFXAcceptorSubkey;
435 if (ctx->more_flags & LOCAL)
436 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
438 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
442 * In Wrap tokens with confidentiality, the EC field is
443 * used to encode the size (in bytes) of the random filler.
445 token->Flags |= CFXSealed;
446 token->EC[0] = (ec >> 8) & 0xFF;
447 token->EC[1] = (ec >> 0) & 0xFF;
451 * In Wrap tokens without confidentiality, the EC field is
452 * used to encode the size (in bytes) of the trailing
455 * This is not used in the checksum calcuation itself,
456 * because the checksum length could potentially vary
457 * depending on the data length.
464 * In Wrap tokens that provide for confidentiality, the RRC
465 * field in the header contains the hex value 00 00 before
468 * In Wrap tokens that do not provide for confidentiality,
469 * both the EC and RRC fields in the appended checksum
470 * contain the hex value 00 00 for the purpose of calculating
476 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
477 krb5_auth_con_getlocalseqnumber(context,
480 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
481 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
482 krb5_auth_con_setlocalseqnumber(context,
485 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
487 data = calloc(iov_count + 3, sizeof(data[0]));
489 *minor_status = ENOMEM;
490 major_status = GSS_S_FAILURE;
498 {"header" | encrypt(plaintext-data | ec-padding | E"header")}
500 Expanded, this is with with RRC = 0:
502 {"header" | krb5-header | plaintext-data | ec-padding | E"header" | krb5-trailer }
504 In DCE-RPC mode == no trailer: RRC = gss "trailer" == length(ec-padding | E"header" | krb5-trailer)
506 {"header" | ec-padding | E"header" | krb5-trailer | krb5-header | plaintext-data }
510 data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
511 data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
512 data[i].data.length = k5hsize;
514 for (i = 1; i < iov_count + 1; i++) {
515 switch (GSS_IOV_BUFFER_TYPE(iov[i - 1].type)) {
516 case GSS_IOV_BUFFER_TYPE_DATA:
517 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
519 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
520 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
523 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
526 data[i].data.length = iov[i - 1].buffer.length;
527 data[i].data.data = iov[i - 1].buffer.value;
531 * Any necessary padding is added here to ensure that the
532 * encrypted token header is always at the end of the
536 /* encrypted CFX header in trailer (or after the header if in
537 DCE mode). Copy in header into E"header"
539 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
541 data[i].data.data = trailer->buffer.value;
543 data[i].data.data = ((uint8_t *)header->buffer.value) + sizeof(*token);
545 data[i].data.length = ec + sizeof(*token);
546 memset(data[i].data.data, 0xFF, ec);
547 memcpy(((uint8_t *)data[i].data.data) + ec, token, sizeof(*token));
550 /* Kerberos trailer comes after the gss trailer */
551 data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
552 data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
553 data[i].data.length = k5tsize;
556 ret = krb5_encrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
559 major_status = GSS_S_FAILURE;
564 token->RRC[0] = (rrc >> 8) & 0xFF;
565 token->RRC[1] = (rrc >> 0) & 0xFF;
572 {data | "header" | gss-trailer (krb5 checksum)
578 for (i = 0; i < iov_count; i++) {
579 switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
580 case GSS_IOV_BUFFER_TYPE_DATA:
581 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
583 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
584 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
587 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
590 data[i].data.length = iov[i].buffer.length;
591 data[i].data.data = iov[i].buffer.value;
594 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
595 data[i].data.data = header->buffer.value;
596 data[i].data.length = sizeof(gss_cfx_wrap_token_desc);
599 data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
601 data[i].data.data = trailer->buffer.value;
603 data[i].data.data = (uint8_t *)header->buffer.value +
604 sizeof(gss_cfx_wrap_token_desc);
606 data[i].data.length = k5tsize;
609 ret = krb5_create_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
612 major_status = GSS_S_FAILURE;
617 token->RRC[0] = (rrc >> 8) & 0xFF;
618 token->RRC[1] = (rrc >> 0) & 0xFF;
621 token->EC[0] = (k5tsize >> 8) & 0xFF;
622 token->EC[1] = (k5tsize >> 0) & 0xFF;
625 if (conf_state != NULL)
626 *conf_state = conf_req_flag;
631 return GSS_S_COMPLETE;
637 gss_release_iov_buffer(&junk, iov, iov_count);
643 /* This is slowpath */
645 unrotate_iov(OM_uint32 *minor_status, size_t rrc, gss_iov_buffer_desc *iov, int iov_count)
648 size_t len = 0, skip;
651 for (i = 0; i < iov_count; i++)
652 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
653 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
654 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
655 len += iov[i].buffer.length;
659 *minor_status = ENOMEM;
660 return GSS_S_FAILURE;
666 for (i = 0; i < iov_count; i++) {
667 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
668 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
669 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
671 memcpy(q, iov[i].buffer.value, iov[i].buffer.length);
672 q += iov[i].buffer.length;
675 assert((size_t)(q - p) == len);
677 /* unrotate first part */
680 for (i = 0; i < iov_count; i++) {
681 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
682 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
683 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
685 if (iov[i].buffer.length <= skip) {
686 skip -= iov[i].buffer.length;
688 memcpy(((uint8_t *)iov[i].buffer.value) + skip, q, iov[i].buffer.length - skip);
689 q += iov[i].buffer.length - skip;
697 for (i = 0; i < iov_count; i++) {
698 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
699 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
700 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
702 memcpy(q, iov[i].buffer.value, min(iov[i].buffer.length, skip));
703 if (iov[i].buffer.length > skip)
705 skip -= iov[i].buffer.length;
706 q += iov[i].buffer.length;
709 return GSS_S_COMPLETE;
715 _gssapi_unwrap_cfx_iov(OM_uint32 *minor_status,
717 krb5_context context,
719 gss_qop_t *qop_state,
720 gss_iov_buffer_desc *iov,
723 OM_uint32 seq_number_lo, seq_number_hi, major_status, junk;
724 gss_iov_buffer_desc *header, *trailer, *padding;
725 gss_cfx_wrap_token token, ttoken;
730 krb5_crypto_iov *data = NULL;
735 header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
736 if (header == NULL) {
737 *minor_status = EINVAL;
738 return GSS_S_FAILURE;
741 if (header->buffer.length < sizeof(*token)) /* we check exact below */
742 return GSS_S_DEFECTIVE_TOKEN;
744 padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
745 if (padding != NULL && padding->buffer.length != 0) {
746 *minor_status = EINVAL;
747 return GSS_S_FAILURE;
750 trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
752 major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
753 if (major_status != GSS_S_COMPLETE) {
757 token = (gss_cfx_wrap_token)header->buffer.value;
759 if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04)
760 return GSS_S_DEFECTIVE_TOKEN;
762 /* Ignore unknown flags */
763 token_flags = token->Flags &
764 (CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
766 if (token_flags & CFXSentByAcceptor) {
767 if ((ctx->more_flags & LOCAL) == 0)
768 return GSS_S_DEFECTIVE_TOKEN;
771 if (ctx->more_flags & ACCEPTOR_SUBKEY) {
772 if ((token_flags & CFXAcceptorSubkey) == 0)
773 return GSS_S_DEFECTIVE_TOKEN;
775 if (token_flags & CFXAcceptorSubkey)
776 return GSS_S_DEFECTIVE_TOKEN;
779 if (token->Filler != 0xFF)
780 return GSS_S_DEFECTIVE_TOKEN;
782 if (conf_state != NULL)
783 *conf_state = (token_flags & CFXSealed) ? 1 : 0;
785 ec = (token->EC[0] << 8) | token->EC[1];
786 rrc = (token->RRC[0] << 8) | token->RRC[1];
789 * Check sequence number
791 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
792 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
794 /* no support for 64-bit sequence numbers */
795 *minor_status = ERANGE;
796 return GSS_S_UNSEQ_TOKEN;
799 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
800 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
803 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
806 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
809 * Decrypt and/or verify checksum
812 if (ctx->more_flags & LOCAL) {
813 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
815 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
818 data = calloc(iov_count + 3, sizeof(data[0]));
820 *minor_status = ENOMEM;
821 major_status = GSS_S_FAILURE;
825 if (token_flags & CFXSealed) {
826 size_t k5tsize, k5hsize;
828 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_HEADER, &k5hsize);
829 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_TRAILER, &k5tsize);
831 /* Rotate by RRC; bogus to do this in-place XXX */
834 if (trailer == NULL) {
835 size_t gsstsize = k5tsize + sizeof(*token);
836 size_t gsshsize = k5hsize + sizeof(*token);
838 if (rrc != gsstsize) {
839 major_status = GSS_S_DEFECTIVE_TOKEN;
843 if (IS_DCE_STYLE(ctx))
846 gsshsize += gsstsize;
848 if (header->buffer.length != gsshsize) {
849 major_status = GSS_S_DEFECTIVE_TOKEN;
852 } else if (trailer->buffer.length != sizeof(*token) + k5tsize) {
853 major_status = GSS_S_DEFECTIVE_TOKEN;
855 } else if (header->buffer.length != sizeof(*token) + k5hsize) {
856 major_status = GSS_S_DEFECTIVE_TOKEN;
858 } else if (rrc != 0) {
859 /* go though slowpath */
860 major_status = unrotate_iov(minor_status, rrc, iov, iov_count);
866 data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
867 data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
868 data[i].data.length = k5hsize;
871 for (j = 0; j < iov_count; i++, j++) {
872 switch (GSS_IOV_BUFFER_TYPE(iov[j].type)) {
873 case GSS_IOV_BUFFER_TYPE_DATA:
874 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
876 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
877 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
880 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
883 data[i].data.length = iov[j].buffer.length;
884 data[i].data.data = iov[j].buffer.value;
887 /* encrypted CFX header in trailer (or after the header if in
888 DCE mode). Copy in header into E"header"
890 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
892 data[i].data.data = trailer->buffer.value;
894 data[i].data.data = ((uint8_t *)header->buffer.value) +
895 header->buffer.length - k5hsize - k5tsize - ec- sizeof(*token);
898 data[i].data.length = ec + sizeof(*token);
899 ttoken = (gss_cfx_wrap_token)(((uint8_t *)data[i].data.data) + ec);
902 /* Kerberos trailer comes after the gss trailer */
903 data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
904 data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
905 data[i].data.length = k5tsize;
908 ret = krb5_decrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
911 major_status = GSS_S_FAILURE;
915 ttoken->RRC[0] = token->RRC[0];
916 ttoken->RRC[1] = token->RRC[1];
918 /* Check the integrity of the header */
919 if (ct_memcmp(ttoken, token, sizeof(*token)) != 0) {
920 major_status = GSS_S_BAD_MIC;
924 size_t gsstsize = ec;
925 size_t gsshsize = sizeof(*token);
927 if (trailer == NULL) {
929 if (rrc != gsstsize) {
930 *minor_status = EINVAL;
931 major_status = GSS_S_FAILURE;
935 gsshsize += gsstsize;
937 } else if (trailer->buffer.length != gsstsize) {
938 major_status = GSS_S_DEFECTIVE_TOKEN;
940 } else if (rrc != 0) {
942 *minor_status = EINVAL;
943 major_status = GSS_S_FAILURE;
947 if (header->buffer.length != gsshsize) {
948 major_status = GSS_S_DEFECTIVE_TOKEN;
952 for (i = 0; i < iov_count; i++) {
953 switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
954 case GSS_IOV_BUFFER_TYPE_DATA:
955 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
957 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
958 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
961 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
964 data[i].data.length = iov[i].buffer.length;
965 data[i].data.data = iov[i].buffer.value;
968 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
969 data[i].data.data = header->buffer.value;
970 data[i].data.length = sizeof(*token);
973 data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
975 data[i].data.data = trailer->buffer.value;
977 data[i].data.data = (uint8_t *)header->buffer.value +
980 data[i].data.length = ec;
983 token = (gss_cfx_wrap_token)header->buffer.value;
989 ret = krb5_verify_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
992 major_status = GSS_S_FAILURE;
997 if (qop_state != NULL) {
998 *qop_state = GSS_C_QOP_DEFAULT;
1004 return GSS_S_COMPLETE;
1010 gss_release_iov_buffer(&junk, iov, iov_count);
1012 return major_status;
1017 _gssapi_wrap_iov_length_cfx(OM_uint32 *minor_status,
1019 krb5_context context,
1023 gss_iov_buffer_desc *iov,
1026 OM_uint32 major_status;
1029 gss_iov_buffer_desc *header = NULL;
1030 gss_iov_buffer_desc *padding = NULL;
1031 gss_iov_buffer_desc *trailer = NULL;
1032 size_t gsshsize = 0;
1033 size_t gsstsize = 0;
1037 GSSAPI_KRB5_INIT (&context);
1040 for (size = 0, i = 0; i < iov_count; i++) {
1041 switch(GSS_IOV_BUFFER_TYPE(iov[i].type)) {
1042 case GSS_IOV_BUFFER_TYPE_EMPTY:
1044 case GSS_IOV_BUFFER_TYPE_DATA:
1045 size += iov[i].buffer.length;
1047 case GSS_IOV_BUFFER_TYPE_HEADER:
1048 if (header != NULL) {
1050 return GSS_S_FAILURE;
1054 case GSS_IOV_BUFFER_TYPE_TRAILER:
1055 if (trailer != NULL) {
1057 return GSS_S_FAILURE;
1061 case GSS_IOV_BUFFER_TYPE_PADDING:
1062 if (padding != NULL) {
1064 return GSS_S_FAILURE;
1068 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
1071 *minor_status = EINVAL;
1072 return GSS_S_FAILURE;
1076 major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
1077 if (major_status != GSS_S_COMPLETE) {
1078 return major_status;
1081 if (conf_req_flag) {
1087 size += sizeof(gss_cfx_wrap_token_desc);
1089 *minor_status = krb5_crypto_length(context, ctx->crypto,
1090 KRB5_CRYPTO_TYPE_HEADER,
1093 return GSS_S_FAILURE;
1095 *minor_status = krb5_crypto_length(context, ctx->crypto,
1096 KRB5_CRYPTO_TYPE_TRAILER,
1099 return GSS_S_FAILURE;
1101 *minor_status = krb5_crypto_length(context, ctx->crypto,
1102 KRB5_CRYPTO_TYPE_PADDING,
1105 return GSS_S_FAILURE;
1108 k5psize = k5pbase - (size % k5pbase);
1113 if (k5psize == 0 && IS_DCE_STYLE(ctx)) {
1114 *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
1117 return GSS_S_FAILURE;
1124 gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
1125 gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
1127 *minor_status = krb5_crypto_length(context, ctx->crypto,
1128 KRB5_CRYPTO_TYPE_CHECKSUM,
1131 return GSS_S_FAILURE;
1133 gsshsize = sizeof(gss_cfx_wrap_token_desc);
1137 if (trailer != NULL) {
1138 trailer->buffer.length = gsstsize;
1140 gsshsize += gsstsize;
1143 header->buffer.length = gsshsize;
1146 /* padding is done via EC and is contained in the header or trailer */
1147 padding->buffer.length = 0;
1151 *conf_state = conf_req_flag;
1154 return GSS_S_COMPLETE;
1160 OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
1161 const gsskrb5_ctx ctx,
1162 krb5_context context,
1164 const gss_buffer_t input_message_buffer,
1166 gss_buffer_t output_message_buffer)
1168 gss_cfx_wrap_token token;
1169 krb5_error_code ret;
1172 size_t wrapped_len, cksumsize;
1173 uint16_t padlength, rrc = 0;
1177 ret = _gsskrb5cfx_wrap_length_cfx(context,
1178 ctx->crypto, conf_req_flag,
1180 input_message_buffer->length,
1181 &wrapped_len, &cksumsize, &padlength);
1183 *minor_status = ret;
1184 return GSS_S_FAILURE;
1187 /* Always rotate encrypted token (if any) and checksum to header */
1188 rrc = (conf_req_flag ? sizeof(*token) : 0) + (uint16_t)cksumsize;
1190 output_message_buffer->length = wrapped_len;
1191 output_message_buffer->value = malloc(output_message_buffer->length);
1192 if (output_message_buffer->value == NULL) {
1193 *minor_status = ENOMEM;
1194 return GSS_S_FAILURE;
1197 p = output_message_buffer->value;
1198 token = (gss_cfx_wrap_token)p;
1199 token->TOK_ID[0] = 0x05;
1200 token->TOK_ID[1] = 0x04;
1202 token->Filler = 0xFF;
1203 if ((ctx->more_flags & LOCAL) == 0)
1204 token->Flags |= CFXSentByAcceptor;
1205 if (ctx->more_flags & ACCEPTOR_SUBKEY)
1206 token->Flags |= CFXAcceptorSubkey;
1207 if (conf_req_flag) {
1209 * In Wrap tokens with confidentiality, the EC field is
1210 * used to encode the size (in bytes) of the random filler.
1212 token->Flags |= CFXSealed;
1213 token->EC[0] = (padlength >> 8) & 0xFF;
1214 token->EC[1] = (padlength >> 0) & 0xFF;
1217 * In Wrap tokens without confidentiality, the EC field is
1218 * used to encode the size (in bytes) of the trailing
1221 * This is not used in the checksum calcuation itself,
1222 * because the checksum length could potentially vary
1223 * depending on the data length.
1230 * In Wrap tokens that provide for confidentiality, the RRC
1231 * field in the header contains the hex value 00 00 before
1234 * In Wrap tokens that do not provide for confidentiality,
1235 * both the EC and RRC fields in the appended checksum
1236 * contain the hex value 00 00 for the purpose of calculating
1242 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1243 krb5_auth_con_getlocalseqnumber(context,
1246 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
1247 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
1248 krb5_auth_con_setlocalseqnumber(context,
1251 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1254 * If confidentiality is requested, the token header is
1255 * appended to the plaintext before encryption; the resulting
1256 * token is {"header" | encrypt(plaintext | pad | "header")}.
1258 * If no confidentiality is requested, the checksum is
1259 * calculated over the plaintext concatenated with the
1262 if (ctx->more_flags & LOCAL) {
1263 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1265 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1268 if (conf_req_flag) {
1270 * Any necessary padding is added here to ensure that the
1271 * encrypted token header is always at the end of the
1274 * The specification does not require that the padding
1275 * bytes are initialized.
1277 p += sizeof(*token);
1278 memcpy(p, input_message_buffer->value, input_message_buffer->length);
1279 memset(p + input_message_buffer->length, 0xFF, padlength);
1280 memcpy(p + input_message_buffer->length + padlength,
1281 token, sizeof(*token));
1283 ret = krb5_encrypt(context, ctx->crypto,
1285 input_message_buffer->length + padlength +
1289 *minor_status = ret;
1290 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1291 return GSS_S_FAILURE;
1293 assert(sizeof(*token) + cipher.length == wrapped_len);
1294 token->RRC[0] = (rrc >> 8) & 0xFF;
1295 token->RRC[1] = (rrc >> 0) & 0xFF;
1298 * this is really ugly, but needed against windows
1299 * for DCERPC, as windows rotates by EC+RRC.
1301 if (IS_DCE_STYLE(ctx)) {
1302 ret = rrc_rotate(cipher.data, cipher.length, rrc+padlength, FALSE);
1304 ret = rrc_rotate(cipher.data, cipher.length, rrc, FALSE);
1307 *minor_status = ret;
1308 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1309 return GSS_S_FAILURE;
1311 memcpy(p, cipher.data, cipher.length);
1312 krb5_data_free(&cipher);
1317 buf = malloc(input_message_buffer->length + sizeof(*token));
1319 *minor_status = ENOMEM;
1320 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1321 return GSS_S_FAILURE;
1323 memcpy(buf, input_message_buffer->value, input_message_buffer->length);
1324 memcpy(buf + input_message_buffer->length, token, sizeof(*token));
1326 ret = krb5_create_checksum(context, ctx->crypto,
1328 input_message_buffer->length +
1332 *minor_status = ret;
1333 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1335 return GSS_S_FAILURE;
1340 assert(cksum.checksum.length == cksumsize);
1341 token->EC[0] = (cksum.checksum.length >> 8) & 0xFF;
1342 token->EC[1] = (cksum.checksum.length >> 0) & 0xFF;
1343 token->RRC[0] = (rrc >> 8) & 0xFF;
1344 token->RRC[1] = (rrc >> 0) & 0xFF;
1346 p += sizeof(*token);
1347 memcpy(p, input_message_buffer->value, input_message_buffer->length);
1348 memcpy(p + input_message_buffer->length,
1349 cksum.checksum.data, cksum.checksum.length);
1352 input_message_buffer->length + cksum.checksum.length, rrc, FALSE);
1354 *minor_status = ret;
1355 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1356 free_Checksum(&cksum);
1357 return GSS_S_FAILURE;
1359 free_Checksum(&cksum);
1362 if (conf_state != NULL) {
1363 *conf_state = conf_req_flag;
1367 return GSS_S_COMPLETE;
1370 OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
1371 const gsskrb5_ctx ctx,
1372 krb5_context context,
1373 const gss_buffer_t input_message_buffer,
1374 gss_buffer_t output_message_buffer,
1376 gss_qop_t *qop_state)
1378 gss_cfx_wrap_token token;
1380 krb5_error_code ret;
1384 OM_uint32 seq_number_lo, seq_number_hi;
1390 if (input_message_buffer->length < sizeof(*token)) {
1391 return GSS_S_DEFECTIVE_TOKEN;
1394 p = input_message_buffer->value;
1396 token = (gss_cfx_wrap_token)p;
1398 if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04) {
1399 return GSS_S_DEFECTIVE_TOKEN;
1402 /* Ignore unknown flags */
1403 token_flags = token->Flags &
1404 (CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
1406 if (token_flags & CFXSentByAcceptor) {
1407 if ((ctx->more_flags & LOCAL) == 0)
1408 return GSS_S_DEFECTIVE_TOKEN;
1411 if (ctx->more_flags & ACCEPTOR_SUBKEY) {
1412 if ((token_flags & CFXAcceptorSubkey) == 0)
1413 return GSS_S_DEFECTIVE_TOKEN;
1415 if (token_flags & CFXAcceptorSubkey)
1416 return GSS_S_DEFECTIVE_TOKEN;
1419 if (token->Filler != 0xFF) {
1420 return GSS_S_DEFECTIVE_TOKEN;
1423 if (conf_state != NULL) {
1424 *conf_state = (token_flags & CFXSealed) ? 1 : 0;
1427 ec = (token->EC[0] << 8) | token->EC[1];
1428 rrc = (token->RRC[0] << 8) | token->RRC[1];
1431 * Check sequence number
1433 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
1434 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
1435 if (seq_number_hi) {
1436 /* no support for 64-bit sequence numbers */
1437 *minor_status = ERANGE;
1438 return GSS_S_UNSEQ_TOKEN;
1441 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1442 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1445 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1446 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1449 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1452 * Decrypt and/or verify checksum
1455 if (ctx->more_flags & LOCAL) {
1456 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1458 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1461 p += sizeof(*token);
1462 len = input_message_buffer->length;
1463 len -= (p - (u_char *)input_message_buffer->value);
1465 if (token_flags & CFXSealed) {
1467 * this is really ugly, but needed against windows
1468 * for DCERPC, as windows rotates by EC+RRC.
1470 if (IS_DCE_STYLE(ctx)) {
1471 *minor_status = rrc_rotate(p, len, rrc+ec, TRUE);
1473 *minor_status = rrc_rotate(p, len, rrc, TRUE);
1475 if (*minor_status != 0) {
1476 return GSS_S_FAILURE;
1479 ret = krb5_decrypt(context, ctx->crypto, usage,
1482 *minor_status = ret;
1483 return GSS_S_BAD_MIC;
1486 /* Check that there is room for the pad and token header */
1487 if (data.length < ec + sizeof(*token)) {
1488 krb5_data_free(&data);
1489 return GSS_S_DEFECTIVE_TOKEN;
1492 p += data.length - sizeof(*token);
1494 /* RRC is unprotected; don't modify input buffer */
1495 ((gss_cfx_wrap_token)p)->RRC[0] = token->RRC[0];
1496 ((gss_cfx_wrap_token)p)->RRC[1] = token->RRC[1];
1498 /* Check the integrity of the header */
1499 if (ct_memcmp(p, token, sizeof(*token)) != 0) {
1500 krb5_data_free(&data);
1501 return GSS_S_BAD_MIC;
1504 output_message_buffer->value = data.data;
1505 output_message_buffer->length = data.length - ec - sizeof(*token);
1509 /* Rotate by RRC; bogus to do this in-place XXX */
1510 *minor_status = rrc_rotate(p, len, rrc, TRUE);
1511 if (*minor_status != 0) {
1512 return GSS_S_FAILURE;
1515 /* Determine checksum type */
1516 ret = krb5_crypto_get_checksum_type(context,
1520 *minor_status = ret;
1521 return GSS_S_FAILURE;
1524 cksum.checksum.length = ec;
1526 /* Check we have at least as much data as the checksum */
1527 if (len < cksum.checksum.length) {
1528 *minor_status = ERANGE;
1529 return GSS_S_BAD_MIC;
1532 /* Length now is of the plaintext only, no checksum */
1533 len -= cksum.checksum.length;
1534 cksum.checksum.data = p + len;
1536 output_message_buffer->length = len; /* for later */
1537 output_message_buffer->value = malloc(len + sizeof(*token));
1538 if (output_message_buffer->value == NULL) {
1539 *minor_status = ENOMEM;
1540 return GSS_S_FAILURE;
1543 /* Checksum is over (plaintext-data | "header") */
1544 memcpy(output_message_buffer->value, p, len);
1545 memcpy((u_char *)output_message_buffer->value + len,
1546 token, sizeof(*token));
1548 /* EC is not included in checksum calculation */
1549 token = (gss_cfx_wrap_token)((u_char *)output_message_buffer->value +
1556 ret = krb5_verify_checksum(context, ctx->crypto,
1558 output_message_buffer->value,
1559 len + sizeof(*token),
1562 *minor_status = ret;
1563 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1564 return GSS_S_BAD_MIC;
1568 if (qop_state != NULL) {
1569 *qop_state = GSS_C_QOP_DEFAULT;
1573 return GSS_S_COMPLETE;
1576 OM_uint32 _gssapi_mic_cfx(OM_uint32 *minor_status,
1577 const gsskrb5_ctx ctx,
1578 krb5_context context,
1580 const gss_buffer_t message_buffer,
1581 gss_buffer_t message_token)
1583 gss_cfx_mic_token token;
1584 krb5_error_code ret;
1591 len = message_buffer->length + sizeof(*token);
1594 *minor_status = ENOMEM;
1595 return GSS_S_FAILURE;
1598 memcpy(buf, message_buffer->value, message_buffer->length);
1600 token = (gss_cfx_mic_token)(buf + message_buffer->length);
1601 token->TOK_ID[0] = 0x04;
1602 token->TOK_ID[1] = 0x04;
1604 if ((ctx->more_flags & LOCAL) == 0)
1605 token->Flags |= CFXSentByAcceptor;
1606 if (ctx->more_flags & ACCEPTOR_SUBKEY)
1607 token->Flags |= CFXAcceptorSubkey;
1608 memset(token->Filler, 0xFF, 5);
1610 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1611 krb5_auth_con_getlocalseqnumber(context,
1614 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
1615 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
1616 krb5_auth_con_setlocalseqnumber(context,
1619 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1621 if (ctx->more_flags & LOCAL) {
1622 usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1624 usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1627 ret = krb5_create_checksum(context, ctx->crypto,
1628 usage, 0, buf, len, &cksum);
1630 *minor_status = ret;
1632 return GSS_S_FAILURE;
1635 /* Determine MIC length */
1636 message_token->length = sizeof(*token) + cksum.checksum.length;
1637 message_token->value = malloc(message_token->length);
1638 if (message_token->value == NULL) {
1639 *minor_status = ENOMEM;
1640 free_Checksum(&cksum);
1642 return GSS_S_FAILURE;
1645 /* Token is { "header" | get_mic("header" | plaintext-data) } */
1646 memcpy(message_token->value, token, sizeof(*token));
1647 memcpy((u_char *)message_token->value + sizeof(*token),
1648 cksum.checksum.data, cksum.checksum.length);
1650 free_Checksum(&cksum);
1654 return GSS_S_COMPLETE;
1657 OM_uint32 _gssapi_verify_mic_cfx(OM_uint32 *minor_status,
1658 const gsskrb5_ctx ctx,
1659 krb5_context context,
1660 const gss_buffer_t message_buffer,
1661 const gss_buffer_t token_buffer,
1662 gss_qop_t *qop_state)
1664 gss_cfx_mic_token token;
1666 krb5_error_code ret;
1668 OM_uint32 seq_number_lo, seq_number_hi;
1674 if (token_buffer->length < sizeof(*token)) {
1675 return GSS_S_DEFECTIVE_TOKEN;
1678 p = token_buffer->value;
1680 token = (gss_cfx_mic_token)p;
1682 if (token->TOK_ID[0] != 0x04 || token->TOK_ID[1] != 0x04) {
1683 return GSS_S_DEFECTIVE_TOKEN;
1686 /* Ignore unknown flags */
1687 token_flags = token->Flags & (CFXSentByAcceptor | CFXAcceptorSubkey);
1689 if (token_flags & CFXSentByAcceptor) {
1690 if ((ctx->more_flags & LOCAL) == 0)
1691 return GSS_S_DEFECTIVE_TOKEN;
1693 if (ctx->more_flags & ACCEPTOR_SUBKEY) {
1694 if ((token_flags & CFXAcceptorSubkey) == 0)
1695 return GSS_S_DEFECTIVE_TOKEN;
1697 if (token_flags & CFXAcceptorSubkey)
1698 return GSS_S_DEFECTIVE_TOKEN;
1701 if (ct_memcmp(token->Filler, "\xff\xff\xff\xff\xff", 5) != 0) {
1702 return GSS_S_DEFECTIVE_TOKEN;
1706 * Check sequence number
1708 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
1709 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
1710 if (seq_number_hi) {
1711 *minor_status = ERANGE;
1712 return GSS_S_UNSEQ_TOKEN;
1715 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1716 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1719 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1722 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1727 ret = krb5_crypto_get_checksum_type(context, ctx->crypto,
1730 *minor_status = ret;
1731 return GSS_S_FAILURE;
1734 cksum.checksum.data = p + sizeof(*token);
1735 cksum.checksum.length = token_buffer->length - sizeof(*token);
1737 if (ctx->more_flags & LOCAL) {
1738 usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1740 usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1743 buf = malloc(message_buffer->length + sizeof(*token));
1745 *minor_status = ENOMEM;
1746 return GSS_S_FAILURE;
1748 memcpy(buf, message_buffer->value, message_buffer->length);
1749 memcpy(buf + message_buffer->length, token, sizeof(*token));
1751 ret = krb5_verify_checksum(context, ctx->crypto,
1754 sizeof(*token) + message_buffer->length,
1757 *minor_status = ret;
1759 return GSS_S_BAD_MIC;
1764 if (qop_state != NULL) {
1765 *qop_state = GSS_C_QOP_DEFAULT;
1768 return GSS_S_COMPLETE;