summaryrefslogtreecommitdiff
path: root/third_party/heimdal/lib/gssapi/spnego
diff options
context:
space:
mode:
authorStefan Metzmacher <metze@samba.org>2022-01-19 13:15:45 +0100
committerJoseph Sutton <jsutton@samba.org>2022-01-19 21:41:59 +0000
commit7055827b8ffd3823c1240ba3f0b619dd6068cd51 (patch)
treeabb14aa7455bde7b1b33b706123c57ccfc28fcaa /third_party/heimdal/lib/gssapi/spnego
parent1954e50f266256c9e153c9613f49f9d9f5dbf67b (diff)
downloadsamba-7055827b8ffd3823c1240ba3f0b619dd6068cd51.tar.gz
HEIMDAL: move code from source4/heimdal* to third_party/heimdal*
This makes it clearer that we always want to do heimdal changes via the lorikeet-heimdal repository. Signed-off-by: Stefan Metzmacher <metze@samba.org> Reviewed-by: Joseph Sutton <josephsutton@catalyst.net.nz> Autobuild-User(master): Joseph Sutton <jsutton@samba.org> Autobuild-Date(master): Wed Jan 19 21:41:59 UTC 2022 on sn-devel-184
Diffstat (limited to 'third_party/heimdal/lib/gssapi/spnego')
-rw-r--r--third_party/heimdal/lib/gssapi/spnego/accept_sec_context.c1023
-rw-r--r--third_party/heimdal/lib/gssapi/spnego/compat.c684
-rw-r--r--third_party/heimdal/lib/gssapi/spnego/context_storage.c492
-rw-r--r--third_party/heimdal/lib/gssapi/spnego/context_stubs.c578
-rw-r--r--third_party/heimdal/lib/gssapi/spnego/external.c165
-rw-r--r--third_party/heimdal/lib/gssapi/spnego/init_sec_context.c841
-rw-r--r--third_party/heimdal/lib/gssapi/spnego/negoex_ctx.c1037
-rw-r--r--third_party/heimdal/lib/gssapi/spnego/negoex_err.et25
-rw-r--r--third_party/heimdal/lib/gssapi/spnego/negoex_locl.h127
-rw-r--r--third_party/heimdal/lib/gssapi/spnego/negoex_util.c1042
-rw-r--r--third_party/heimdal/lib/gssapi/spnego/spnego.asn166
-rw-r--r--third_party/heimdal/lib/gssapi/spnego/spnego.opt1
-rw-r--r--third_party/heimdal/lib/gssapi/spnego/spnego_locl.h159
13 files changed, 6240 insertions, 0 deletions
diff --git a/third_party/heimdal/lib/gssapi/spnego/accept_sec_context.c b/third_party/heimdal/lib/gssapi/spnego/accept_sec_context.c
new file mode 100644
index 00000000000..8cb4211da26
--- /dev/null
+++ b/third_party/heimdal/lib/gssapi/spnego/accept_sec_context.c
@@ -0,0 +1,1023 @@
+/*
+ * Copyright (c) 1997 - 2006 Kungliga Tekniska Högskolan
+ * (Royal Institute of Technology, Stockholm, Sweden).
+ * Portions Copyright (c) 2004 PADL Software Pty Ltd.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Institute nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "spnego_locl.h"
+
+static OM_uint32
+send_reject (OM_uint32 *minor_status,
+ gss_const_buffer_t mech_token,
+ gss_buffer_t output_token)
+{
+ NegotiationToken nt;
+ size_t size;
+ heim_octet_string responseToken;
+
+ nt.element = choice_NegotiationToken_negTokenResp;
+
+ ALLOC(nt.u.negTokenResp.negState, 1);
+ if (nt.u.negTokenResp.negState == NULL) {
+ *minor_status = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+ *(nt.u.negTokenResp.negState) = reject;
+ nt.u.negTokenResp.supportedMech = NULL;
+ nt.u.negTokenResp.responseToken = NULL;
+
+ if (mech_token != GSS_C_NO_BUFFER && mech_token->value != NULL) {
+ responseToken.length = mech_token->length;
+ responseToken.data = mech_token->value;
+ nt.u.negTokenResp.responseToken = &responseToken;
+ } else
+ nt.u.negTokenResp.responseToken = NULL;
+ nt.u.negTokenResp.mechListMIC = NULL;
+
+ ASN1_MALLOC_ENCODE(NegotiationToken,
+ output_token->value, output_token->length, &nt,
+ &size, *minor_status);
+ nt.u.negTokenResp.responseToken = NULL; /* allocated on stack */
+ free_NegotiationToken(&nt);
+ if (*minor_status != 0)
+ return GSS_S_FAILURE;
+
+ return GSS_S_BAD_MECH;
+}
+
+static OM_uint32
+acceptor_approved(OM_uint32 *minor_status,
+ void *userptr,
+ gss_const_name_t target_name,
+ gss_const_cred_id_t cred_handle,
+ gss_OID mech)
+{
+ gss_cred_id_t cred = GSS_C_NO_CREDENTIAL;
+ gss_OID_set oidset = GSS_C_NO_OID_SET;
+ OM_uint32 junk, ret;
+
+ if (target_name == GSS_C_NO_NAME)
+ return GSS_S_COMPLETE;
+
+ if (gss_oid_equal(mech, GSS_NEGOEX_MECHANISM)) {
+ size_t i;
+
+ ret = _gss_spnego_indicate_mechs(minor_status, &oidset);
+ if (ret != GSS_S_COMPLETE)
+ return ret;
+
+ /* before committing to NegoEx, check we can negotiate a mech */
+ for (i = 0; i < oidset->count; i++) {
+ gss_OID inner_mech = &oidset->elements[i];
+
+ if (_gss_negoex_mech_p(inner_mech)) {
+ ret = acceptor_approved(minor_status, userptr,
+ target_name, cred_handle,
+ inner_mech);
+ if (ret == GSS_S_COMPLETE)
+ break;
+ }
+ }
+ } else if (cred_handle != GSS_C_NO_CREDENTIAL) {
+ ret = gss_inquire_cred_by_mech(minor_status, cred_handle, mech,
+ NULL, NULL, NULL, NULL);
+ } else {
+ ret = gss_create_empty_oid_set(minor_status, &oidset);
+ if (ret == GSS_S_COMPLETE)
+ ret = gss_add_oid_set_member(minor_status, mech, &oidset);
+ if (ret == GSS_S_COMPLETE)
+ ret = gss_acquire_cred(minor_status, target_name,
+ GSS_C_INDEFINITE, oidset,
+ GSS_C_ACCEPT, &cred, NULL, NULL);
+ }
+
+ gss_release_oid_set(&junk, &oidset);
+ gss_release_cred(&junk, &cred);
+
+ return ret;
+}
+
+static OM_uint32
+send_supported_mechs (OM_uint32 *minor_status,
+ gssspnego_ctx ctx,
+ gss_const_cred_id_t acceptor_cred,
+ gss_buffer_t output_token)
+{
+ NegotiationToken2 nt;
+ size_t buf_len = 0;
+ gss_buffer_desc data;
+ OM_uint32 ret;
+
+ memset(&nt, 0, sizeof(nt));
+
+ nt.element = choice_NegotiationToken2_negTokenInit;
+ nt.u.negTokenInit.reqFlags = NULL;
+ nt.u.negTokenInit.mechToken = NULL;
+ nt.u.negTokenInit.negHints = NULL;
+
+ ret = _gss_spnego_indicate_mechtypelist(minor_status, GSS_C_NO_NAME, 0,
+ acceptor_approved, ctx, 1, acceptor_cred,
+ &nt.u.negTokenInit.mechTypes, NULL);
+ if (ret != GSS_S_COMPLETE) {
+ return ret;
+ }
+
+ ALLOC(nt.u.negTokenInit.negHints, 1);
+ if (nt.u.negTokenInit.negHints == NULL) {
+ *minor_status = ENOMEM;
+ free_NegotiationToken2(&nt);
+ return GSS_S_FAILURE;
+ }
+
+ ALLOC(nt.u.negTokenInit.negHints->hintName, 1);
+ if (nt.u.negTokenInit.negHints->hintName == NULL) {
+ *minor_status = ENOMEM;
+ free_NegotiationToken2(&nt);
+ return GSS_S_FAILURE;
+ }
+
+ *nt.u.negTokenInit.negHints->hintName = strdup("not_defined_in_RFC4178@please_ignore");
+ nt.u.negTokenInit.negHints->hintAddress = NULL;
+
+ ASN1_MALLOC_ENCODE(NegotiationToken2,
+ data.value, data.length, &nt, &buf_len, ret);
+ free_NegotiationToken2(&nt);
+ if (ret) {
+ *minor_status = ret;
+ return GSS_S_FAILURE;
+ }
+ if (data.length != buf_len) {
+ abort();
+ UNREACHABLE(return GSS_S_FAILURE);
+ }
+
+ ret = gss_encapsulate_token(&data, GSS_SPNEGO_MECHANISM, output_token);
+
+ free (data.value);
+
+ if (ret != GSS_S_COMPLETE)
+ return ret;
+
+ *minor_status = 0;
+
+ return GSS_S_CONTINUE_NEEDED;
+}
+
+static OM_uint32
+send_accept (OM_uint32 *minor_status,
+ gssspnego_ctx context_handle,
+ int optimistic_mech_ok,
+ gss_buffer_t mech_token,
+ gss_const_OID selected_mech, /* valid on initial response only */
+ gss_buffer_t mech_buf,
+ gss_buffer_t output_token)
+{
+ int initial_response = (selected_mech != GSS_C_NO_OID);
+ NegotiationToken nt;
+ OM_uint32 ret, minor;
+ gss_buffer_desc mech_mic_buf;
+ size_t size;
+
+ memset(&nt, 0, sizeof(nt));
+
+ nt.element = choice_NegotiationToken_negTokenResp;
+
+ ALLOC(nt.u.negTokenResp.negState, 1);
+ if (nt.u.negTokenResp.negState == NULL) {
+ *minor_status = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+
+ if (context_handle->flags.open) {
+ if (mech_token != GSS_C_NO_BUFFER
+ && mech_token->length != 0
+ && mech_buf != GSS_C_NO_BUFFER)
+ *(nt.u.negTokenResp.negState) = accept_incomplete;
+ else
+ *(nt.u.negTokenResp.negState) = accept_completed;
+ } else {
+ if (initial_response && !optimistic_mech_ok)
+ *(nt.u.negTokenResp.negState) = request_mic;
+ else
+ *(nt.u.negTokenResp.negState) = accept_incomplete;
+ }
+
+ if (initial_response) {
+ ALLOC(nt.u.negTokenResp.supportedMech, 1);
+ if (nt.u.negTokenResp.supportedMech == NULL) {
+ *minor_status = ENOMEM;
+ ret = GSS_S_FAILURE;
+ goto out;
+ }
+
+ ret = der_get_oid(selected_mech->elements,
+ selected_mech->length,
+ nt.u.negTokenResp.supportedMech,
+ NULL);
+ if (ret) {
+ *minor_status = ENOMEM;
+ ret = GSS_S_FAILURE;
+ goto out;
+ }
+
+ _gss_spnego_log_mech("acceptor sending selected mech", selected_mech);
+ } else {
+ nt.u.negTokenResp.supportedMech = NULL;
+ }
+
+ if (mech_token != GSS_C_NO_BUFFER && mech_token->length != 0) {
+ ALLOC(nt.u.negTokenResp.responseToken, 1);
+ if (nt.u.negTokenResp.responseToken == NULL) {
+ *minor_status = ENOMEM;
+ ret = GSS_S_FAILURE;
+ goto out;
+ }
+ nt.u.negTokenResp.responseToken->length = mech_token->length;
+ nt.u.negTokenResp.responseToken->data = mech_token->value;
+ mech_token->length = 0;
+ mech_token->value = NULL;
+ } else {
+ nt.u.negTokenResp.responseToken = NULL;
+ }
+
+ if (mech_buf != GSS_C_NO_BUFFER) {
+ ret = gss_get_mic(minor_status,
+ context_handle->negotiated_ctx_id,
+ 0,
+ mech_buf,
+ &mech_mic_buf);
+ if (ret == GSS_S_COMPLETE) {
+ _gss_spnego_ntlm_reset_crypto(&minor, context_handle, FALSE);
+
+ ALLOC(nt.u.negTokenResp.mechListMIC, 1);
+ if (nt.u.negTokenResp.mechListMIC == NULL) {
+ gss_release_buffer(minor_status, &mech_mic_buf);
+ *minor_status = ENOMEM;
+ ret = GSS_S_FAILURE;
+ goto out;
+ }
+ nt.u.negTokenResp.mechListMIC->length = mech_mic_buf.length;
+ nt.u.negTokenResp.mechListMIC->data = mech_mic_buf.value;
+ } else if (ret == GSS_S_UNAVAILABLE) {
+ nt.u.negTokenResp.mechListMIC = NULL;
+ } else {
+ goto out;
+ }
+
+ } else
+ nt.u.negTokenResp.mechListMIC = NULL;
+
+ ASN1_MALLOC_ENCODE(NegotiationToken,
+ output_token->value, output_token->length,
+ &nt, &size, ret);
+ if (ret) {
+ *minor_status = ENOMEM;
+ ret = GSS_S_FAILURE;
+ goto out;
+ }
+
+ /*
+ * The response should not be encapsulated, because
+ * it is a SubsequentContextToken (note though RFC 1964
+ * specifies encapsulation for all _Kerberos_ tokens).
+ */
+
+ if (*(nt.u.negTokenResp.negState) == accept_completed)
+ ret = GSS_S_COMPLETE;
+ else
+ ret = GSS_S_CONTINUE_NEEDED;
+
+ out:
+ free_NegotiationToken(&nt);
+ return ret;
+}
+
+/*
+ * Return the default acceptor identity based on the local hostname
+ * or the GSSAPI_SPNEGO_NAME environment variable.
+ */
+
+static OM_uint32
+default_acceptor_name(OM_uint32 *minor_status,
+ gss_name_t *namep)
+{
+ OM_uint32 major_status;
+ gss_buffer_desc namebuf;
+ char *str = NULL, *host, hostname[MAXHOSTNAMELEN];
+
+ *namep = GSS_C_NO_NAME;
+
+ host = secure_getenv("GSSAPI_SPNEGO_NAME");
+ if (host == NULL) {
+ int rv;
+
+ if (gethostname(hostname, sizeof(hostname)) != 0) {
+ *minor_status = errno;
+ return GSS_S_FAILURE;
+ }
+
+ rv = asprintf(&str, "host@%s", hostname);
+ if (rv < 0 || str == NULL) {
+ *minor_status = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+ host = str;
+ }
+
+ namebuf.length = strlen(host);
+ namebuf.value = host;
+
+ major_status = gss_import_name(minor_status, &namebuf,
+ GSS_C_NT_HOSTBASED_SERVICE, namep);
+
+ free(str);
+
+ return major_status;
+}
+
+/*
+ * Determine whether the mech in mechType can be negotiated. If the
+ * mech is NegoEx, make NegoEx mechanisms available for negotiation.
+ */
+
+static OM_uint32
+select_mech(OM_uint32 *minor_status,
+ gssspnego_ctx ctx,
+ gss_const_cred_id_t cred,
+ gss_const_OID_set supported_mechs,
+ MechType *mechType,
+ int verify_p, /* set on non-optimistic tokens */
+ gss_const_OID *advertised_mech_p)
+{
+ char mechbuf[64];
+ size_t mech_len;
+ gss_OID_desc oid;
+ gss_OID selected_mech = GSS_C_NO_OID;
+ OM_uint32 ret, junk;
+ int negoex_proposed = FALSE, negoex_selected = FALSE;
+ int includeMSCompatOID = FALSE;
+ size_t i;
+
+ *minor_status = 0;
+ *advertised_mech_p = GSS_C_NO_OID; /* deals with broken MS OID */
+
+ ctx->selected_mech_type = GSS_C_NO_OID;
+
+ ret = der_put_oid ((unsigned char *)mechbuf + sizeof(mechbuf) - 1,
+ sizeof(mechbuf),
+ mechType,
+ &mech_len);
+ if (ret)
+ return GSS_S_DEFECTIVE_TOKEN;
+
+ oid.length = (OM_uint32)mech_len;
+ oid.elements = mechbuf + sizeof(mechbuf) - mech_len;
+
+ if (gss_oid_equal(&oid, GSS_NEGOEX_MECHANISM))
+ negoex_proposed = TRUE;
+ else if (gss_oid_equal(&oid, &_gss_spnego_mskrb_mechanism_oid_desc))
+ includeMSCompatOID = TRUE;
+
+ for (i = 0; i < supported_mechs->count; i++) {
+ gss_OID iter = &supported_mechs->elements[i];
+ auth_scheme scheme;
+ int is_negoex_mech = /* mechanism is negotiable under NegoEx */
+ gssspi_query_mechanism_info(&junk, iter, scheme) == GSS_S_COMPLETE;
+
+ if (is_negoex_mech && negoex_proposed) {
+ ret = _gss_negoex_add_auth_mech(minor_status, ctx, iter, scheme);
+ if (ret != GSS_S_COMPLETE)
+ break;
+
+ negoex_selected = TRUE;
+ }
+
+ if (gss_oid_equal(includeMSCompatOID ? GSS_KRB5_MECHANISM : &oid, iter)) {
+ ret = _gss_intern_oid(minor_status, iter, &selected_mech);
+ if (ret != GSS_S_COMPLETE)
+ return ret;
+
+ break;
+ }
+ }
+
+ /* always prefer NegoEx if a mechanism supported both */
+ if (negoex_selected)
+ selected_mech = GSS_NEGOEX_MECHANISM;
+ if (selected_mech == GSS_C_NO_OID)
+ ret = GSS_S_BAD_MECH;
+ if (ret != GSS_S_COMPLETE)
+ return ret;
+
+ heim_assert(!gss_oid_equal(selected_mech, GSS_SPNEGO_MECHANISM),
+ "SPNEGO should not be able to negotiate itself");
+
+ if (verify_p) {
+ gss_name_t name = GSS_C_NO_NAME;
+
+ /*
+ * If we do not have a credential, acquire a default name as a hint
+ * to acceptor_approved() so it can attempt to acquire a default
+ * credential.
+ */
+ if (cred == GSS_C_NO_CREDENTIAL) {
+ ret = default_acceptor_name(minor_status, &name);
+ if (ret != GSS_S_COMPLETE)
+ return ret;
+ }
+
+ ret = acceptor_approved(minor_status, ctx, name, cred, selected_mech);
+
+ gss_release_name(&junk, &name);
+ } else {
+ /* Stash optimistic mech for use by _gss_spnego_require_mechlist_mic() */
+ ret = gss_duplicate_oid(minor_status, &oid, &ctx->preferred_mech_type);
+ }
+
+ if (ret == GSS_S_COMPLETE) {
+ *minor_status = 0;
+
+ *advertised_mech_p = ctx->selected_mech_type = selected_mech;
+
+ /* if the initiator used the broken MS OID, send that instead */
+ if (includeMSCompatOID && gss_oid_equal(selected_mech, GSS_KRB5_MECHANISM))
+ *advertised_mech_p = &_gss_spnego_mskrb_mechanism_oid_desc;
+ }
+
+ return ret;
+}
+
+
+static OM_uint32
+acceptor_complete(OM_uint32 * minor_status,
+ gssspnego_ctx ctx,
+ int *get_mic,
+ gss_buffer_t mech_input_token,
+ gss_buffer_t mech_output_token,
+ heim_octet_string *mic,
+ gss_buffer_t output_token)
+{
+ gss_buffer_desc buf = GSS_C_EMPTY_BUFFER;
+ OM_uint32 ret;
+ int verify_mic;
+
+ ctx->flags.require_mic = 1;
+ ctx->flags.safe_omit = _gss_spnego_safe_omit_mechlist_mic(ctx);
+
+ if (ctx->flags.open) {
+ if (mech_input_token == GSS_C_NO_BUFFER) { /* Even/One */
+ verify_mic = 1;
+ *get_mic = 0;
+ } else if (mech_output_token != GSS_C_NO_BUFFER &&
+ mech_output_token->length == 0) { /* Odd */
+ *get_mic = verify_mic = 1;
+ } else { /* Even/One */
+ verify_mic = 0;
+ *get_mic = 1;
+ }
+
+ /*
+ * Change from previous versions: do not generate a MIC if not
+ * necessary. This conforms to RFC4178 s.5 ("if the accepted
+ * mechanism is the most preferred mechanism of both the initiator
+ * and acceptor, then the MIC token exchange... is OPTIONAL"),
+ * and is consistent with MIT and Windows behavior.
+ */
+ if (ctx->flags.safe_omit)
+ *get_mic = 0;
+
+ if (verify_mic && mic == NULL && ctx->flags.safe_omit) {
+ /*
+ * Peer is old and didn't send a mic while we expected
+ * one, but since it safe to omit, let do that
+ */
+ } else if (verify_mic) {
+ ret = _gss_spnego_verify_mechtypes_mic(minor_status, ctx, mic);
+ if (ret) {
+ if (*get_mic)
+ send_reject(minor_status, GSS_C_NO_BUFFER, output_token);
+ if (buf.value)
+ free(buf.value);
+ return ret;
+ }
+ }
+ } else
+ *get_mic = 0;
+
+ return GSS_S_COMPLETE;
+}
+
+/*
+ * Call gss_accept_sec_context() via mechglue or NegoEx, depending on
+ * whether mech_oid is NegoEx.
+ */
+
+static OM_uint32
+mech_accept(OM_uint32 *minor_status,
+ gssspnego_ctx ctx,
+ gss_const_cred_id_t acceptor_cred_handle,
+ gss_const_buffer_t input_token_buffer,
+ const gss_channel_bindings_t input_chan_bindings,
+ gss_buffer_t output_token,
+ gss_cred_id_t *delegated_cred_handle)
+{
+ OM_uint32 ret, junk;
+
+ heim_assert(ctx->selected_mech_type != GSS_C_NO_OID,
+ "mech_accept called with no selected mech");
+
+ if (gss_oid_equal(ctx->selected_mech_type, GSS_NEGOEX_MECHANISM)) {
+ ret = _gss_negoex_accept(minor_status,
+ ctx,
+ (gss_cred_id_t)acceptor_cred_handle,
+ input_token_buffer,
+ input_chan_bindings,
+ output_token,
+ delegated_cred_handle);
+ } else {
+ if (ctx->mech_src_name != GSS_C_NO_NAME)
+ gss_release_name(&junk, &ctx->mech_src_name);
+
+ ret = gss_accept_sec_context(minor_status,
+ &ctx->negotiated_ctx_id,
+ acceptor_cred_handle,
+ (gss_buffer_t)input_token_buffer,
+ input_chan_bindings,
+ &ctx->mech_src_name,
+ &ctx->negotiated_mech_type,
+ output_token,
+ &ctx->mech_flags,
+ &ctx->mech_time_rec,
+ delegated_cred_handle);
+ if (GSS_ERROR(ret))
+ gss_mg_collect_error(ctx->negotiated_mech_type, ret, *minor_status);
+ else if (ctx->negotiated_mech_type != GSS_C_NO_OID &&
+ !gss_oid_equal(ctx->negotiated_mech_type, ctx->selected_mech_type))
+ _gss_mg_log(1, "spnego client didn't send the mech they said they would");
+ }
+
+ return ret;
+}
+
+static OM_uint32 GSSAPI_CALLCONV
+acceptor_start
+ (OM_uint32 * minor_status,
+ gss_ctx_id_t * context_handle,
+ gss_const_cred_id_t acceptor_cred_handle,
+ const gss_buffer_t input_token_buffer,
+ const gss_channel_bindings_t input_chan_bindings,
+ gss_name_t * src_name,
+ gss_OID * mech_type,
+ gss_buffer_t output_token,
+ OM_uint32 * ret_flags,
+ OM_uint32 * time_rec,
+ gss_cred_id_t *delegated_cred_handle
+ )
+{
+ OM_uint32 ret, junk;
+ NegotiationToken nt;
+ gss_OID_set supported_mechs = GSS_C_NO_OID_SET;
+ size_t size;
+ NegTokenInit *ni;
+ gss_buffer_desc data;
+ gss_buffer_t mech_input_token = GSS_C_NO_BUFFER;
+ gss_buffer_desc mech_output_token;
+ gssspnego_ctx ctx;
+ int get_mic = 0, first_ok = 0, canonical_order;
+ gss_const_OID advertised_mech = GSS_C_NO_OID;
+
+ memset(&nt, 0, sizeof(nt));
+
+ mech_output_token.value = NULL;
+ mech_output_token.length = 0;
+
+ if (input_token_buffer->length == 0)
+ return send_supported_mechs (minor_status, NULL,
+ acceptor_cred_handle, output_token);
+
+ ret = _gss_spnego_alloc_sec_context(minor_status, context_handle);
+ if (ret != GSS_S_COMPLETE)
+ return ret;
+
+ ctx = (gssspnego_ctx)*context_handle;
+
+ HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
+
+ /*
+ * The GSS-API encapsulation is only present on the initial
+ * context token (negTokenInit).
+ */
+ ret = gss_decapsulate_token (input_token_buffer,
+ GSS_SPNEGO_MECHANISM,
+ &data);
+ if (ret)
+ goto out;
+
+ ret = decode_NegotiationToken(data.value, data.length, &nt, &size);
+ gss_release_buffer(minor_status, &data);
+ if (ret) {
+ *minor_status = ret;
+ ret = GSS_S_DEFECTIVE_TOKEN;
+ goto out;
+ }
+ if (nt.element != choice_NegotiationToken_negTokenInit) {
+ *minor_status = 0;
+ ret = GSS_S_DEFECTIVE_TOKEN;
+ goto out;
+ }
+ ni = &nt.u.negTokenInit;
+
+ if (ni->mechTypes.len < 1) {
+ free_NegotiationToken(&nt);
+ *minor_status = 0;
+ ret = GSS_S_DEFECTIVE_TOKEN;
+ goto out;
+ }
+
+ _gss_spnego_log_mechTypes(&ni->mechTypes);
+
+ {
+ MechTypeList mt;
+ int kret;
+
+ mt.len = ni->mechTypes.len;
+ mt.val = ni->mechTypes.val;
+
+ ASN1_MALLOC_ENCODE(MechTypeList,
+ ctx->NegTokenInit_mech_types.value,
+ ctx->NegTokenInit_mech_types.length,
+ &mt, &size, kret);
+ if (kret) {
+ *minor_status = kret;
+ ret = GSS_S_FAILURE;
+ goto out;
+ }
+ }
+
+ if (acceptor_cred_handle != GSS_C_NO_CREDENTIAL)
+ ret = _gss_spnego_inquire_cred_mechs(minor_status,
+ acceptor_cred_handle,
+ &supported_mechs,
+ &canonical_order);
+ else
+ ret = _gss_spnego_indicate_mechs(minor_status, &supported_mechs);
+ if (ret != GSS_S_COMPLETE)
+ goto out;
+
+ /*
+ * First we try the opportunistic token if we have support for it,
+ * don't try to verify we have credential for the token,
+ * gss_accept_sec_context() will (hopefully) tell us that.
+ * If that failes,
+ */
+
+ ret = select_mech(minor_status,
+ ctx,
+ acceptor_cred_handle,
+ supported_mechs,
+ &ni->mechTypes.val[0],
+ 0, /* optimistic token */
+ &advertised_mech);
+
+ if (ret == GSS_S_COMPLETE && ni->mechToken != NULL) {
+ gss_buffer_desc ibuf;
+
+ ibuf.length = ni->mechToken->length;
+ ibuf.value = ni->mechToken->data;
+ mech_input_token = &ibuf;
+
+ _gss_spnego_log_mech("acceptor selected opportunistic mech", ctx->selected_mech_type);
+
+ ret = mech_accept(&junk,
+ ctx,
+ acceptor_cred_handle,
+ mech_input_token,
+ input_chan_bindings,
+ &mech_output_token,
+ delegated_cred_handle);
+ if (ret == GSS_S_COMPLETE || ret == GSS_S_CONTINUE_NEEDED) {
+ first_ok = 1;
+ } else {
+ ctx->selected_mech_type = GSS_C_NO_OID;
+ }
+
+ if (ret == GSS_S_COMPLETE) {
+ ret = acceptor_complete(minor_status,
+ ctx,
+ &get_mic,
+ mech_input_token,
+ &mech_output_token,
+ ni->mechListMIC,
+ output_token);
+ if (ret != GSS_S_COMPLETE)
+ goto out;
+
+ ctx->flags.open = 1;
+ }
+ } else {
+ *minor_status = 0;
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
+ return gss_mg_set_error_string(GSS_C_NO_OID, GSS_S_NO_CONTEXT,
+ *minor_status,
+ "SPNEGO acceptor didn't find a prefered mechanism");
+ }
+
+ /*
+ * If opportunistic token failed, lets try the other mechs.
+ */
+
+ if (!first_ok) {
+ size_t j;
+
+ /* Call glue layer to find first mech we support */
+ for (j = 1; j < ni->mechTypes.len; ++j) {
+ ret = select_mech(&junk,
+ ctx,
+ acceptor_cred_handle,
+ supported_mechs,
+ &ni->mechTypes.val[j],
+ 1, /* not optimistic token */
+ &advertised_mech);
+ if (ret == GSS_S_COMPLETE) {
+ _gss_spnego_log_mech("acceptor selected non-opportunistic mech",
+ ctx->selected_mech_type);
+ break;
+ }
+ }
+ }
+ if (ctx->selected_mech_type == GSS_C_NO_OID) {
+ heim_assert(ret != GSS_S_COMPLETE, "no oid and no error code?");
+ *minor_status = junk;
+ goto out;
+ }
+
+ /* The initial token always has a response */
+ ret = send_accept(minor_status,
+ ctx,
+ first_ok,
+ &mech_output_token,
+ advertised_mech,
+ get_mic ? &ctx->NegTokenInit_mech_types : NULL,
+ output_token);
+ if (ret)
+ goto out;
+
+out:
+ gss_release_oid_set(&junk, &supported_mechs);
+ if (mech_output_token.value != NULL)
+ gss_release_buffer(&junk, &mech_output_token);
+ free_NegotiationToken(&nt);
+
+
+ if (ret == GSS_S_COMPLETE) {
+ if (src_name != NULL && ctx->mech_src_name != GSS_C_NO_NAME)
+ ret = gss_duplicate_name(minor_status,
+ ctx->mech_src_name,
+ src_name);
+ }
+
+ if (mech_type != NULL)
+ *mech_type = ctx->negotiated_mech_type;
+ if (ret_flags != NULL)
+ *ret_flags = ctx->mech_flags;
+ if (time_rec != NULL)
+ *time_rec = ctx->mech_time_rec;
+
+ if (ret == GSS_S_COMPLETE || ret == GSS_S_CONTINUE_NEEDED) {
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
+ return ret;
+ }
+
+ _gss_spnego_internal_delete_sec_context(&junk, context_handle,
+ GSS_C_NO_BUFFER);
+
+ return ret;
+}
+
+
+static OM_uint32 GSSAPI_CALLCONV
+acceptor_continue
+ (OM_uint32 * minor_status,
+ gss_ctx_id_t * context_handle,
+ gss_const_cred_id_t acceptor_cred_handle,
+ const gss_buffer_t input_token_buffer,
+ const gss_channel_bindings_t input_chan_bindings,
+ gss_name_t * src_name,
+ gss_OID * mech_type,
+ gss_buffer_t output_token,
+ OM_uint32 * ret_flags,
+ OM_uint32 * time_rec,
+ gss_cred_id_t *delegated_cred_handle
+ )
+{
+ OM_uint32 ret, ret2, minor, junk;
+ NegotiationToken nt;
+ size_t nt_len;
+ NegTokenResp *na;
+ unsigned int negState = accept_incomplete;
+ gss_buffer_t mech_input_token = GSS_C_NO_BUFFER;
+ gss_buffer_t mech_output_token = GSS_C_NO_BUFFER;
+ gssspnego_ctx ctx;
+
+ ctx = (gssspnego_ctx)*context_handle;
+
+ /*
+ * The GSS-API encapsulation is only present on the initial
+ * context token (negTokenInit).
+ */
+
+ ret = decode_NegotiationToken(input_token_buffer->value,
+ input_token_buffer->length,
+ &nt, &nt_len);
+ if (ret) {
+ *minor_status = ret;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+ if (nt.element != choice_NegotiationToken_negTokenResp) {
+ *minor_status = 0;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+ na = &nt.u.negTokenResp;
+
+ if (na->negState != NULL) {
+ negState = *(na->negState);
+ }
+
+ HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
+
+ {
+ gss_buffer_desc ibuf, obuf;
+ int get_mic = 0;
+ int require_response;
+
+ if (na->responseToken != NULL) {
+ ibuf.length = na->responseToken->length;
+ ibuf.value = na->responseToken->data;
+ mech_input_token = &ibuf;
+ } else {
+ ibuf.value = NULL;
+ ibuf.length = 0;
+ }
+
+ if (mech_input_token != GSS_C_NO_BUFFER) {
+
+ ret = mech_accept(minor_status,
+ ctx,
+ acceptor_cred_handle,
+ mech_input_token,
+ input_chan_bindings,
+ &obuf,
+ delegated_cred_handle);
+ mech_output_token = &obuf;
+ if (ret != GSS_S_COMPLETE && ret != GSS_S_CONTINUE_NEEDED) {
+ free_NegotiationToken(&nt);
+ send_reject(&junk, mech_output_token, output_token);
+ gss_release_buffer(&junk, mech_output_token);
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
+ return ret;
+ }
+ if (ret == GSS_S_COMPLETE)
+ ctx->flags.open = 1;
+ } else
+ ret = GSS_S_COMPLETE;
+
+ if (ret == GSS_S_COMPLETE)
+ ret = acceptor_complete(minor_status,
+ ctx,
+ &get_mic,
+ mech_input_token,
+ mech_output_token,
+ na->mechListMIC,
+ output_token);
+
+ if (ctx->mech_flags & GSS_C_DCE_STYLE)
+ require_response = (negState != accept_completed);
+ else
+ require_response = 0;
+
+ /*
+ * Check whether we need to send a result: there should be only
+ * one accept_completed response sent in the entire negotiation
+ */
+ if ((mech_output_token != GSS_C_NO_BUFFER &&
+ mech_output_token->length != 0)
+ || (ctx->flags.open && negState == accept_incomplete)
+ || require_response
+ || get_mic) {
+ ret2 = send_accept (minor_status,
+ ctx,
+ 0, /* ignored on subsequent tokens */
+ mech_output_token,
+ GSS_C_NO_OID,
+ get_mic ? &ctx->NegTokenInit_mech_types : NULL,
+ output_token);
+ if (ret2)
+ goto out;
+ } else
+ ret2 = GSS_S_COMPLETE;
+
+ out:
+ if (ret2 != GSS_S_COMPLETE)
+ ret = ret2;
+ if (mech_output_token != NULL)
+ gss_release_buffer(&minor, mech_output_token);
+ free_NegotiationToken(&nt);
+ }
+
+ if (ret == GSS_S_COMPLETE) {
+ if (src_name != NULL && ctx->mech_src_name != GSS_C_NO_NAME)
+ ret = gss_duplicate_name(minor_status,
+ ctx->mech_src_name,
+ src_name);
+ }
+
+ if (mech_type != NULL)
+ *mech_type = ctx->negotiated_mech_type;
+ if (ret_flags != NULL)
+ *ret_flags = ctx->mech_flags;
+ if (time_rec != NULL)
+ *time_rec = ctx->mech_time_rec;
+
+ if (ret == GSS_S_COMPLETE || ret == GSS_S_CONTINUE_NEEDED) {
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
+ return ret;
+ }
+
+ _gss_spnego_internal_delete_sec_context(&minor, context_handle,
+ GSS_C_NO_BUFFER);
+
+ return ret;
+}
+
+OM_uint32 GSSAPI_CALLCONV
+_gss_spnego_accept_sec_context
+ (OM_uint32 * minor_status,
+ gss_ctx_id_t * context_handle,
+ gss_const_cred_id_t acceptor_cred_handle,
+ const gss_buffer_t input_token_buffer,
+ const gss_channel_bindings_t input_chan_bindings,
+ gss_name_t * src_name,
+ gss_OID * mech_type,
+ gss_buffer_t output_token,
+ OM_uint32 * ret_flags,
+ OM_uint32 * time_rec,
+ gss_cred_id_t *delegated_cred_handle
+ )
+{
+ _gss_accept_sec_context_t *func;
+
+ *minor_status = 0;
+
+ output_token->length = 0;
+ output_token->value = NULL;
+
+ if (src_name != NULL)
+ *src_name = GSS_C_NO_NAME;
+ if (mech_type != NULL)
+ *mech_type = GSS_C_NO_OID;
+ if (ret_flags != NULL)
+ *ret_flags = 0;
+ if (time_rec != NULL)
+ *time_rec = 0;
+ if (delegated_cred_handle != NULL)
+ *delegated_cred_handle = GSS_C_NO_CREDENTIAL;
+
+
+ if (*context_handle == GSS_C_NO_CONTEXT)
+ func = acceptor_start;
+ else
+ func = acceptor_continue;
+
+
+ return (*func)(minor_status, context_handle, acceptor_cred_handle,
+ input_token_buffer, input_chan_bindings,
+ src_name, mech_type, output_token, ret_flags,
+ time_rec, delegated_cred_handle);
+}
diff --git a/third_party/heimdal/lib/gssapi/spnego/compat.c b/third_party/heimdal/lib/gssapi/spnego/compat.c
new file mode 100644
index 00000000000..6cfe5526631
--- /dev/null
+++ b/third_party/heimdal/lib/gssapi/spnego/compat.c
@@ -0,0 +1,684 @@
+/*
+ * Copyright (c) 2004, PADL Software Pty Ltd.
+ * All rights reserved.
+ *
+ * Portions Copyright (c) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of PADL Software nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL PADL SOFTWARE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "spnego_locl.h"
+
+/*
+ * Apparently Microsoft got the OID wrong, and used
+ * 1.2.840.48018.1.2.2 instead. We need both this and
+ * the correct Kerberos OID here in order to deal with
+ * this. Because this is manifest in SPNEGO only I'd
+ * prefer to deal with this here rather than inside the
+ * Kerberos mechanism.
+ */
+gss_OID_desc _gss_spnego_mskrb_mechanism_oid_desc =
+ {9, rk_UNCONST("\x2a\x86\x48\x82\xf7\x12\x01\x02\x02")};
+
+/*
+ * Allocate a SPNEGO context handle
+ */
+OM_uint32 GSSAPI_CALLCONV
+_gss_spnego_alloc_sec_context (OM_uint32 * minor_status,
+ gss_ctx_id_t *context_handle)
+{
+ gssspnego_ctx ctx;
+
+ ctx = calloc(1, sizeof(*ctx));
+ if (ctx == NULL) {
+ *minor_status = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+
+ ctx->NegTokenInit_mech_types.value = NULL;
+ ctx->NegTokenInit_mech_types.length = 0;
+
+ ctx->preferred_mech_type = GSS_C_NO_OID;
+ ctx->selected_mech_type = GSS_C_NO_OID;
+ ctx->negotiated_mech_type = GSS_C_NO_OID;
+
+ ctx->negotiated_ctx_id = GSS_C_NO_CONTEXT;
+
+ ctx->mech_flags = 0;
+ ctx->mech_time_rec = 0;
+ ctx->mech_src_name = GSS_C_NO_NAME;
+
+ ctx->flags.open = 0;
+ ctx->flags.local = 0;
+ ctx->flags.peer_require_mic = 0;
+ ctx->flags.require_mic = 0;
+ ctx->flags.verified_mic = 0;
+
+ HEIMDAL_MUTEX_init(&ctx->ctx_id_mutex);
+
+ ctx->negoex_step = 0;
+ ctx->negoex_transcript = NULL;
+ ctx->negoex_seqnum = 0;
+ HEIM_TAILQ_INIT(&ctx->negoex_mechs);
+ memset(ctx->negoex_conv_id, 0, GUID_LENGTH);
+
+ *context_handle = (gss_ctx_id_t)ctx;
+
+ return GSS_S_COMPLETE;
+}
+
+/*
+ * Free a SPNEGO context handle. The caller must have acquired
+ * the lock before this is called.
+ */
+OM_uint32 GSSAPI_CALLCONV _gss_spnego_internal_delete_sec_context
+ (OM_uint32 *minor_status,
+ gss_ctx_id_t *context_handle,
+ gss_buffer_t output_token
+ )
+{
+ gssspnego_ctx ctx;
+ OM_uint32 ret, minor;
+
+ *minor_status = 0;
+
+ if (context_handle == NULL) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ if (output_token != GSS_C_NO_BUFFER) {
+ output_token->length = 0;
+ output_token->value = NULL;
+ }
+
+ ctx = (gssspnego_ctx)*context_handle;
+ *context_handle = GSS_C_NO_CONTEXT;
+
+ if (ctx == NULL) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ if (ctx->NegTokenInit_mech_types.value)
+ free(ctx->NegTokenInit_mech_types.value);
+
+ ctx->preferred_mech_type = GSS_C_NO_OID;
+ ctx->negotiated_mech_type = GSS_C_NO_OID;
+ ctx->selected_mech_type = GSS_C_NO_OID;
+
+ gss_release_name(&minor, &ctx->target_name);
+ gss_release_name(&minor, &ctx->mech_src_name);
+
+ if (ctx->negotiated_ctx_id != GSS_C_NO_CONTEXT) {
+ ret = gss_delete_sec_context(minor_status,
+ &ctx->negotiated_ctx_id,
+ output_token);
+ ctx->negotiated_ctx_id = GSS_C_NO_CONTEXT;
+ } else {
+ ret = GSS_S_COMPLETE;
+ }
+
+ _gss_negoex_release_context(ctx);
+
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
+ HEIMDAL_MUTEX_destroy(&ctx->ctx_id_mutex);
+
+ free(ctx);
+
+ return ret;
+}
+
+static int
+inq_context_by_oid_bool(gssspnego_ctx ctx, gss_OID oid)
+{
+ OM_uint32 major, minor;
+ gss_buffer_set_t data_set = GSS_C_NO_BUFFER_SET;
+ uint8_t ret = 0;
+
+ major = gss_inquire_sec_context_by_oid(&minor, ctx->negotiated_ctx_id,
+ oid, &data_set);
+ if (major != GSS_S_COMPLETE)
+ return FALSE;
+
+ if (data_set != GSS_C_NO_BUFFER_SET &&
+ data_set->count == 1 &&
+ data_set->elements[0].length == 1)
+ ret = *((uint8_t *)data_set->elements[0].value);
+
+ gss_release_buffer_set(&minor, &data_set);
+
+ return ret != 0;
+}
+
+/*
+ * Returns TRUE if it is safe to omit mechListMIC.
+ */
+
+int
+_gss_spnego_safe_omit_mechlist_mic(gssspnego_ctx ctx)
+{
+ int safe_omit = FALSE;
+
+ if (ctx->flags.peer_require_mic) {
+ _gss_mg_log(10, "spnego: mechListMIC required by peer");
+ } else if (inq_context_by_oid_bool(ctx, GSS_C_INQ_PEER_HAS_BUGGY_SPNEGO)) {
+ /* [MS-SPNG] Appendix A <7> Section 3.1.5.1: may be old peer with buggy SPNEGO */
+ safe_omit = TRUE;
+ _gss_mg_log(10, "spnego: mechListMIC omitted for legacy interoperability");
+ } else if (inq_context_by_oid_bool(ctx, GSS_C_INQ_REQUIRE_MECHLIST_MIC)) {
+ /* [MS-SPNG] Appendix A <7> Section 3.1.5.1: allow NTLM to force MIC */
+ _gss_mg_log(10, "spnego: mechListMIC required by mechanism");
+ } else if (gss_oid_equal(ctx->selected_mech_type, ctx->preferred_mech_type)) {
+ safe_omit = TRUE;
+ _gss_mg_log(10, "spnego: mechListMIC omitted as preferred mechanism selected");
+ } else {
+ _gss_mg_log(10, "spnego: mechListMIC required by default");
+ }
+
+ return safe_omit;
+}
+
+/*
+ * A map between a GSS-API flag and a (mechanism attribute, weight)
+ * tuple. The list of mechanisms is re-ordered by aggregate weight
+ * (highest weight is more preferred, e.g. if GSS_C_MUTUAL_FLAG and
+ * GSS_C_ANON_FLAG are set, we prefer a mechanism that supports
+ * mutual authentication over one that only supports anonymous).
+ */
+static struct {
+ OM_uint32 flag;
+ gss_OID ma;
+ int weight;
+} flag_to_ma_map[] = {
+ { GSS_C_MUTUAL_FLAG, GSS_C_MA_AUTH_TARG, 2 },
+ { GSS_C_ANON_FLAG, GSS_C_MA_AUTH_INIT_ANON, 1 },
+};
+
+/*
+ * Returns a bitmask indicating GSS flags we can sort on.
+ */
+static inline OM_uint32
+mech_flag_mask(void)
+{
+ size_t i;
+ OM_uint32 mask = 0;
+
+ for (i = 0; i < sizeof(flag_to_ma_map)/sizeof(flag_to_ma_map[0]); i++)
+ mask |= flag_to_ma_map[i].flag;
+
+ return mask;
+}
+
+/*
+ * Returns an integer representing the preference weighting for a
+ * mechanism, based on the requested GSS flags.
+ */
+static int
+mech_weight(gss_const_OID mech, OM_uint32 req_flags)
+{
+ OM_uint32 major, minor;
+ gss_OID_set mech_attrs = GSS_C_NO_OID_SET;
+ int weight = 0;
+ size_t i, j;
+
+ major = gss_inquire_attrs_for_mech(&minor, mech, &mech_attrs, NULL);
+ if (GSS_ERROR(major))
+ return 0;
+
+ for (i = 0; i < sizeof(flag_to_ma_map)/sizeof(flag_to_ma_map[0]); i++) {
+ if ((req_flags & flag_to_ma_map[i].flag) == 0)
+ continue;
+
+ for (j = 0; j < mech_attrs->count; j++) {
+ if (gss_oid_equal(flag_to_ma_map[i].ma, &mech_attrs->elements[j])) {
+ weight += flag_to_ma_map[i].weight;
+ continue;
+ }
+ }
+ }
+
+ gss_release_oid_set(&minor, &mech_attrs);
+
+ return weight;
+}
+
+static int
+mech_compare(const void *mech1, const void *mech2, void *req_flags_p)
+{
+ OM_uint32 req_flags = *((OM_uint32 *)req_flags_p);
+ int mech1_weight = mech_weight(mech1, req_flags);
+ int mech2_weight = mech_weight(mech2, req_flags);
+
+ return mech2_weight - mech1_weight;
+}
+
+/*
+ * Order a list of mechanisms by weight based on requested GSS flags.
+ */
+static void
+order_mechs_by_flags(gss_OID_set mechs, OM_uint32 req_flags)
+{
+ if (req_flags & mech_flag_mask()) { /* skip if flags irrelevant */
+ /*
+ * NB: must be a stable sort to preserve the existing order
+ * of mechanisms that are equally weighted.
+ */
+ mergesort_r(mechs->elements, mechs->count,
+ sizeof(gss_OID_desc), mech_compare, &req_flags);
+ }
+}
+
+static OM_uint32
+add_mech_type(OM_uint32 *minor_status,
+ gss_OID mech_type,
+ MechTypeList *mechtypelist)
+{
+ MechType mech;
+ int ret;
+
+ heim_assert(!gss_oid_equal(mech_type, GSS_SPNEGO_MECHANISM),
+ "SPNEGO mechanism not filtered");
+
+ ret = der_get_oid(mech_type->elements, mech_type->length, &mech, NULL);
+ if (ret == 0) {
+ ret = add_MechTypeList(mechtypelist, &mech);
+ free_MechType(&mech);
+ }
+
+ if (ret) {
+ *minor_status = ret;
+ return GSS_S_FAILURE;
+ }
+
+ return GSS_S_COMPLETE;
+}
+
+static int
+add_mech_if_approved(OM_uint32 *minor_status,
+ gss_const_name_t target_name,
+ OM_uint32 (*func)(OM_uint32 *, void *, gss_const_name_t, gss_const_cred_id_t, gss_OID),
+ void *userptr,
+ int includeMSCompatOID,
+ gss_const_cred_id_t cred_handle,
+ MechTypeList *mechtypelist,
+ gss_OID mech_oid,
+ gss_OID *first_mech,
+ OM_uint32 *first_major,
+ OM_uint32 *first_minor,
+ int *added_negoex)
+{
+ OM_uint32 major, minor;
+
+ /*
+ * Unapproved mechanisms are ignored, but we capture their result
+ * code in case we didn't find any other mechanisms, in which case
+ * we return that to the caller of _gss_spnego_indicate_mechtypelist().
+ */
+ major = (*func)(&minor, userptr, target_name, cred_handle, mech_oid);
+ if (major != GSS_S_COMPLETE) {
+ if (*first_mech == GSS_C_NO_OID) {
+ *first_major = major;
+ *first_minor = minor;
+ }
+ return GSS_S_COMPLETE;
+ }
+
+ if (_gss_negoex_mech_p(mech_oid)) {
+ if (*added_negoex == FALSE) {
+ major = add_mech_type(minor_status, GSS_NEGOEX_MECHANISM, mechtypelist);
+ if (major != GSS_S_COMPLETE)
+ return major;
+ *added_negoex = TRUE;
+ }
+
+ if (*first_mech == GSS_C_NO_OID)
+ *first_mech = GSS_NEGOEX_MECHANISM;
+
+ /* if NegoEx-only mech, we are done */
+ if (!_gss_negoex_and_spnego_mech_p(mech_oid))
+ return GSS_S_COMPLETE;
+ }
+
+ if (includeMSCompatOID && gss_oid_equal(mech_oid, GSS_KRB5_MECHANISM)) {
+ major = add_mech_type(minor_status,
+ &_gss_spnego_mskrb_mechanism_oid_desc,
+ mechtypelist);
+ if (major != GSS_S_COMPLETE)
+ return major;
+ }
+
+ major = add_mech_type(minor_status, mech_oid, mechtypelist);
+ if (major != GSS_S_COMPLETE)
+ return major;
+
+ if (*first_mech == GSS_C_NO_OID)
+ *first_mech = mech_oid;
+
+ return GSS_S_COMPLETE;
+}
+
+OM_uint32 GSSAPI_CALLCONV
+_gss_spnego_indicate_mechtypelist (OM_uint32 *minor_status,
+ gss_const_name_t target_name,
+ OM_uint32 req_flags,
+ OM_uint32 (*func)(OM_uint32 *, void *, gss_const_name_t, gss_const_cred_id_t, gss_OID),
+ void *userptr,
+ int includeMSCompatOID,
+ gss_const_cred_id_t cred_handle,
+ MechTypeList *mechtypelist,
+ gss_OID *preferred_mech)
+{
+ gss_OID_set supported_mechs = GSS_C_NO_OID_SET;
+ gss_OID first_mech = GSS_C_NO_OID;
+ OM_uint32 ret, minor;
+ OM_uint32 first_major = GSS_S_BAD_MECH, first_minor = 0;
+ size_t i;
+ int added_negoex = FALSE, canonical_order = FALSE;
+
+ mechtypelist->len = 0;
+ mechtypelist->val = NULL;
+
+ if (cred_handle != GSS_C_NO_CREDENTIAL)
+ ret = _gss_spnego_inquire_cred_mechs(minor_status, cred_handle,
+ &supported_mechs, &canonical_order);
+ else
+ ret = _gss_spnego_indicate_mechs(minor_status, &supported_mechs);
+ if (ret != GSS_S_COMPLETE)
+ return ret;
+
+ if (!canonical_order)
+ order_mechs_by_flags(supported_mechs, req_flags);
+
+ heim_assert(supported_mechs != GSS_C_NO_OID_SET,
+ "NULL mech set returned by SPNEGO inquire/indicate mechs");
+
+ /*
+ * Previously krb5 was tried explicitly, but now the internal mech
+ * list is reordered so that krb5 is first, this should no longer
+ * be required. This permits an application to specify another
+ * mechanism as preferred over krb5 using gss_set_neg_mechs().
+ */
+ for (i = 0; i < supported_mechs->count; i++) {
+ ret = add_mech_if_approved(minor_status, target_name,
+ func, userptr, includeMSCompatOID,
+ cred_handle, mechtypelist,
+ &supported_mechs->elements[i],
+ &first_mech,
+ &first_major, &first_minor,
+ &added_negoex);
+ if (ret != GSS_S_COMPLETE) {
+ gss_release_oid_set(&minor, &supported_mechs);
+ return ret;
+ }
+ }
+
+ heim_assert(mechtypelist->len == 0 || first_mech != GSS_C_NO_OID,
+ "mechtypelist non-empty but no mech selected");
+
+ if (first_mech != GSS_C_NO_OID)
+ ret = _gss_intern_oid(minor_status, first_mech, &first_mech);
+ else if (GSS_ERROR(first_major)) {
+ ret = first_major;
+ *minor_status = first_minor;
+ } else
+ ret = GSS_S_BAD_MECH;
+
+ if (preferred_mech != NULL)
+ *preferred_mech = first_mech;
+
+ gss_release_oid_set(&minor, &supported_mechs);
+
+ return ret;
+}
+
+/*
+ *
+ */
+
+OM_uint32
+_gss_spnego_verify_mechtypes_mic(OM_uint32 *minor_status,
+ gssspnego_ctx ctx,
+ heim_octet_string *mic)
+{
+ gss_buffer_desc mic_buf;
+ OM_uint32 major_status;
+
+ if (mic == NULL) {
+ *minor_status = 0;
+ return gss_mg_set_error_string(GSS_SPNEGO_MECHANISM,
+ GSS_S_DEFECTIVE_TOKEN, 0,
+ "SPNEGO peer failed to send mechListMIC");
+ }
+
+ if (ctx->flags.verified_mic) {
+ /* This doesn't make sense, we've already verified it? */
+ *minor_status = 0;
+ return GSS_S_DUPLICATE_TOKEN;
+ }
+
+ mic_buf.length = mic->length;
+ mic_buf.value = mic->data;
+
+ major_status = gss_verify_mic(minor_status,
+ ctx->negotiated_ctx_id,
+ &ctx->NegTokenInit_mech_types,
+ &mic_buf,
+ NULL);
+ if (major_status == GSS_S_COMPLETE) {
+ _gss_spnego_ntlm_reset_crypto(minor_status, ctx, TRUE);
+ } else if (major_status == GSS_S_UNAVAILABLE) {
+ _gss_mg_log(10, "mech doesn't support MIC, allowing anyway");
+ } else if (major_status) {
+ return gss_mg_set_error_string(GSS_SPNEGO_MECHANISM,
+ GSS_S_DEFECTIVE_TOKEN, *minor_status,
+ "SPNEGO peer sent invalid mechListMIC");
+ }
+ ctx->flags.verified_mic = 1;
+
+ *minor_status = 0;
+
+ return GSS_S_COMPLETE;
+}
+
+/*
+ * According to [MS-SPNG] 3.3.5.1 the crypto state for NTLM is reset
+ * before the completed context is returned to the application.
+ */
+
+OM_uint32
+_gss_spnego_ntlm_reset_crypto(OM_uint32 *minor_status,
+ gssspnego_ctx ctx,
+ OM_uint32 verify)
+{
+ if (gss_oid_equal(ctx->negotiated_mech_type, GSS_NTLM_MECHANISM)) {
+ gss_buffer_desc value;
+
+ value.length = sizeof(verify);
+ value.value = &verify;
+
+ return gss_set_sec_context_option(minor_status,
+ &ctx->negotiated_ctx_id,
+ GSS_C_NTLM_RESET_CRYPTO,
+ &value);
+ }
+
+ return GSS_S_COMPLETE;
+}
+
+void
+_gss_spnego_log_mech(const char *prefix, gss_const_OID oid)
+{
+ gss_buffer_desc oidbuf = GSS_C_EMPTY_BUFFER;
+ OM_uint32 junk;
+ const char *name = NULL;
+
+ if (!_gss_mg_log_level(10))
+ return;
+
+ if (oid == GSS_C_NO_OID ||
+ gss_oid_to_str(&junk, (gss_OID)oid, &oidbuf) != GSS_S_COMPLETE) {
+ _gss_mg_log(10, "spnego: %s (null)", prefix);
+ return;
+ }
+
+ if (gss_oid_equal(oid, GSS_NEGOEX_MECHANISM))
+ name = "negoex"; /* not a real mech */
+ else if (gss_oid_equal(oid, &_gss_spnego_mskrb_mechanism_oid_desc))
+ name = "mskrb";
+ else {
+ gssapi_mech_interface m = __gss_get_mechanism(oid);
+ if (m)
+ name = m->gm_name;
+ }
+
+ _gss_mg_log(10, "spnego: %s %s { %.*s }",
+ prefix,
+ name ? name : "unknown",
+ (int)oidbuf.length, (char *)oidbuf.value);
+ gss_release_buffer(&junk, &oidbuf);
+}
+
+void
+_gss_spnego_log_mechTypes(MechTypeList *mechTypes)
+{
+ size_t i;
+ char mechbuf[64];
+ size_t mech_len;
+ gss_OID_desc oid;
+ int ret;
+
+ if (!_gss_mg_log_level(10))
+ return;
+
+ for (i = 0; i < mechTypes->len; i++) {
+ ret = der_put_oid ((unsigned char *)mechbuf + sizeof(mechbuf) - 1,
+ sizeof(mechbuf),
+ &mechTypes->val[i],
+ &mech_len);
+ if (ret)
+ continue;
+
+ oid.length = (OM_uint32)mech_len;
+ oid.elements = mechbuf + sizeof(mechbuf) - mech_len;
+
+ _gss_spnego_log_mech("initiator proposed mech", &oid);
+ }
+}
+
+/*
+ * Indicate mechs negotiable by SPNEGO
+ */
+
+OM_uint32
+_gss_spnego_indicate_mechs(OM_uint32 *minor_status,
+ gss_OID_set *mechs_p)
+{
+ gss_OID_desc oids[3];
+ gss_OID_set_desc except;
+
+ *mechs_p = GSS_C_NO_OID_SET;
+
+ oids[0] = *GSS_C_MA_DEPRECATED;
+ oids[1] = *GSS_C_MA_NOT_DFLT_MECH;
+ oids[2] = *GSS_C_MA_MECH_NEGO;
+
+ except.count = sizeof(oids) / sizeof(oids[0]);
+ except.elements = oids;
+
+ return gss_indicate_mechs_by_attrs(minor_status,
+ GSS_C_NO_OID_SET,
+ &except,
+ GSS_C_NO_OID_SET,
+ mechs_p);
+}
+
+/*
+ * Indicate mechs in cred negotiatble by SPNEGO
+ */
+
+OM_uint32
+_gss_spnego_inquire_cred_mechs(OM_uint32 *minor_status,
+ gss_const_cred_id_t cred,
+ gss_OID_set *mechs_p,
+ int *canonical_order)
+{
+ OM_uint32 ret, junk;
+ gss_OID_set cred_mechs = GSS_C_NO_OID_SET;
+ gss_OID_set negotiable_mechs = GSS_C_NO_OID_SET;
+ size_t i;
+
+ *mechs_p = GSS_C_NO_OID_SET;
+ *canonical_order = FALSE;
+
+ heim_assert(cred != GSS_C_NO_CREDENTIAL, "Invalid null credential handle");
+
+ ret = gss_get_neg_mechs(minor_status, cred, &cred_mechs);
+ if (ret == GSS_S_COMPLETE) {
+ *canonical_order = TRUE;
+ } else {
+ ret = gss_inquire_cred(minor_status, cred, NULL, NULL, NULL, &cred_mechs);
+ if (ret != GSS_S_COMPLETE)
+ goto out;
+ }
+
+ heim_assert(cred_mechs != GSS_C_NO_OID_SET && cred_mechs->count > 0,
+ "gss_inquire_cred succeeded but returned no mechanisms");
+
+ ret = _gss_spnego_indicate_mechs(minor_status, &negotiable_mechs);
+ if (ret != GSS_S_COMPLETE)
+ goto out;
+
+ heim_assert(negotiable_mechs != GSS_C_NO_OID_SET,
+ "_gss_spnego_indicate_mechs succeeded but returned null OID set");
+
+ ret = gss_create_empty_oid_set(minor_status, mechs_p);
+ if (ret != GSS_S_COMPLETE)
+ goto out;
+
+ /* Filter credential mechs by negotiable mechs, order by credential mechs */
+ for (i = 0; i < cred_mechs->count; i++) {
+ gss_OID cred_mech = &cred_mechs->elements[i];
+ int present = 0;
+
+ gss_test_oid_set_member(&junk, cred_mech, negotiable_mechs, &present);
+ if (!present)
+ continue;
+
+ ret = gss_add_oid_set_member(minor_status, cred_mech, mechs_p);
+ if (ret != GSS_S_COMPLETE)
+ break;
+ }
+
+out:
+ if (ret != GSS_S_COMPLETE)
+ gss_release_oid_set(&junk, mechs_p);
+ gss_release_oid_set(&junk, &cred_mechs);
+ gss_release_oid_set(&junk, &negotiable_mechs);
+
+ return ret;
+}
+
diff --git a/third_party/heimdal/lib/gssapi/spnego/context_storage.c b/third_party/heimdal/lib/gssapi/spnego/context_storage.c
new file mode 100644
index 00000000000..13e20d723e6
--- /dev/null
+++ b/third_party/heimdal/lib/gssapi/spnego/context_storage.c
@@ -0,0 +1,492 @@
+/*
+ * Copyright (C) 2021, PADL Software Pty Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spnego_locl.h"
+
+#define SC_MECH_TYPES 0x0001
+#define SC_PREFERRED_MECH_TYPE 0x0002
+#define SC_SELECTED_MECH_TYPE 0x0004
+#define SC_NEGOTIATED_MECH_TYPE 0x0008
+#define SC_NEGOTIATED_CTX_ID 0x0010
+#define SC_MECH_FLAGS 0x0020
+#define SC_MECH_TIME_REC 0x0040
+#define SC_MECH_SRC_NAME 0x0080
+#define SC_TARGET_NAME 0x0100
+#define SC_NEGOEX 0x0200
+
+#define SNC_OID 0x01
+#define SNC_MECH_CONTEXT 0x02
+#define SNC_METADATA 0x04
+
+static krb5_error_code
+ret_spnego_context(krb5_storage *sp, gssspnego_ctx *ctxp);
+static krb5_error_code
+store_spnego_context(krb5_storage *sp, gssspnego_ctx ctx);
+
+static krb5_error_code
+ret_negoex_auth_mech(krb5_storage *sp, struct negoex_auth_mech **mechp);
+static krb5_error_code
+store_negoex_auth_mech(krb5_storage *sp, struct negoex_auth_mech *mech);
+
+static uint16_t
+spnego_flags_to_int(struct spnego_flags flags);
+static struct spnego_flags
+int_to_spnego_flags(uint16_t f);
+
+OM_uint32 GSSAPI_CALLCONV
+_gss_spnego_import_sec_context_internal(OM_uint32 *minor,
+ gss_const_buffer_t buffer,
+ gssspnego_ctx *ctxp)
+{
+ krb5_error_code ret;
+ krb5_storage *sp;
+
+ sp = krb5_storage_from_readonly_mem(buffer->value, buffer->length);
+ if (sp == NULL) {
+ *minor = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+
+ krb5_storage_set_byteorder(sp, KRB5_STORAGE_BYTEORDER_PACKED);
+
+ ret = ret_spnego_context(sp, ctxp);
+
+ krb5_storage_free(sp);
+
+ *minor = ret;
+ return ret ? GSS_S_FAILURE : GSS_S_COMPLETE;
+}
+
+OM_uint32 GSSAPI_CALLCONV
+_gss_spnego_export_sec_context_internal(OM_uint32 *minor,
+ gssspnego_ctx ctx,
+ gss_buffer_t buffer)
+{
+ krb5_error_code ret;
+ krb5_storage *sp;
+ krb5_data data;
+
+ sp = krb5_storage_emem();
+ if (sp == NULL) {
+ *minor = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+
+ krb5_data_zero(&data);
+
+ krb5_storage_set_byteorder(sp, KRB5_STORAGE_BYTEORDER_PACKED);
+
+ ret = store_spnego_context(sp, ctx);
+ if (ret == 0)
+ ret = krb5_storage_to_data(sp, &data);
+ if (ret == 0) {
+ buffer->length = data.length;
+ buffer->value = data.data;
+ }
+
+ krb5_storage_free(sp);
+
+ *minor = ret;
+ return ret ? GSS_S_FAILURE : GSS_S_COMPLETE;
+}
+
+static krb5_error_code
+ret_spnego_context(krb5_storage *sp, gssspnego_ctx *ctxp)
+{
+ OM_uint32 major = GSS_S_COMPLETE, minor;
+ gssspnego_ctx ctx = NULL;
+ krb5_error_code ret = 0;
+ krb5_data data;
+ gss_buffer_desc buf = GSS_C_EMPTY_BUFFER;
+ uint16_t sc_flags, spnego_flags;
+
+ *ctxp = NULL;
+ krb5_data_zero(&data);
+
+ CHECK(major, _gss_spnego_alloc_sec_context(&minor, (gss_ctx_id_t *)&ctx));
+
+ CHECK(ret, krb5_ret_uint16(sp, &sc_flags));
+ CHECK(ret, krb5_ret_uint16(sp, &spnego_flags));
+ ctx->flags = int_to_spnego_flags(spnego_flags);
+
+ if (sc_flags & SC_MECH_TYPES)
+ CHECK(major, _gss_mg_ret_buffer(&minor, sp, &ctx->NegTokenInit_mech_types));
+ if (sc_flags & SC_PREFERRED_MECH_TYPE)
+ CHECK(major, _gss_mg_ret_oid(&minor, sp, &ctx->preferred_mech_type));
+ if (sc_flags & SC_SELECTED_MECH_TYPE)
+ CHECK(major, _gss_mg_ret_oid(&minor, sp, &ctx->selected_mech_type));
+ if (sc_flags & SC_NEGOTIATED_MECH_TYPE)
+ CHECK(major, _gss_mg_ret_oid(&minor, sp, &ctx->negotiated_mech_type));
+
+ if (sc_flags & SC_NEGOTIATED_CTX_ID) {
+ CHECK(major, _gss_mg_ret_buffer(&minor, sp, &buf));
+ CHECK(major, gss_import_sec_context(&minor, &buf,
+ &ctx->negotiated_ctx_id));
+ gss_release_buffer(&minor, &buf);
+ }
+
+ if (sc_flags & SC_MECH_FLAGS)
+ CHECK(ret, krb5_ret_uint32(sp, &ctx->mech_flags));
+ if (sc_flags & SC_MECH_TIME_REC)
+ CHECK(ret, krb5_ret_uint32(sp, &ctx->mech_time_rec));
+ else
+ ctx->mech_time_rec = GSS_C_INDEFINITE;
+
+ if (sc_flags & SC_MECH_SRC_NAME) {
+ CHECK(major, _gss_mg_ret_buffer(&minor, sp, &buf));
+ CHECK(major, gss_import_name(&minor, &buf, GSS_C_NT_EXPORT_NAME,
+ &ctx->mech_src_name));
+ gss_release_buffer(&minor, &buf);
+ }
+
+ if (sc_flags & SC_TARGET_NAME) {
+ CHECK(major, _gss_mg_ret_buffer(&minor, sp, &buf));
+ CHECK(major, gss_import_name(&minor, &buf, GSS_C_NT_EXPORT_NAME,
+ &ctx->target_name));
+ gss_release_buffer(&minor, &buf);
+ }
+
+ if (sc_flags & SC_NEGOEX) {
+ uint8_t i, nschemes;
+
+ CHECK(ret, krb5_ret_uint8(sp, &ctx->negoex_step));
+
+ CHECK(ret, krb5_ret_data(sp, &data));
+ ctx->negoex_transcript = krb5_storage_emem();
+ if (ctx->negoex_transcript == NULL) {
+ ret = ENOMEM;
+ goto fail;
+ }
+
+ krb5_storage_set_byteorder(ctx->negoex_transcript,
+ KRB5_STORAGE_BYTEORDER_LE);
+ if (krb5_storage_write(ctx->negoex_transcript,
+ data.data, data.length) != data.length) {
+ ret = ENOMEM;
+ goto fail;
+ }
+ krb5_data_free(&data);
+
+ CHECK(ret, krb5_ret_uint32(sp, &ctx->negoex_seqnum));
+
+ if (krb5_storage_read(sp, ctx->negoex_conv_id,
+ GUID_LENGTH) != GUID_LENGTH) {
+ ret = KRB5_BAD_MSIZE;
+ goto fail;
+ }
+
+ CHECK(ret, krb5_ret_uint8(sp, &nschemes));
+ for (i = 0; i < nschemes; i++) {
+ struct negoex_auth_mech *mech;
+
+ CHECK(ret, ret_negoex_auth_mech(sp, &mech));
+ HEIM_TAILQ_INSERT_TAIL(&ctx->negoex_mechs, mech, links);
+ }
+ }
+
+ *ctxp = ctx;
+
+fail:
+ if (ret == 0 && GSS_ERROR(major))
+ ret = minor ? minor : KRB5_BAD_MSIZE;
+ if (ret)
+ _gss_spnego_delete_sec_context(&minor, (gss_ctx_id_t *)&ctx,
+ GSS_C_NO_BUFFER);
+ krb5_data_free(&data);
+ gss_release_buffer(&minor, &buf);
+
+ return ret;
+}
+
+static krb5_error_code
+store_spnego_context(krb5_storage *sp, gssspnego_ctx ctx)
+{
+ OM_uint32 major = GSS_S_COMPLETE, minor;
+ krb5_error_code ret = 0;
+ krb5_data data;
+ gss_buffer_desc buf = GSS_C_EMPTY_BUFFER;
+ uint16_t sc_flags = 0, spnego_flags;
+
+ krb5_data_zero(&data);
+
+ if (ctx->NegTokenInit_mech_types.length)
+ sc_flags |= SC_MECH_TYPES;
+ if (ctx->preferred_mech_type)
+ sc_flags |= SC_PREFERRED_MECH_TYPE;
+ if (ctx->selected_mech_type)
+ sc_flags |= SC_SELECTED_MECH_TYPE;
+ if (ctx->negotiated_mech_type)
+ sc_flags |= SC_NEGOTIATED_MECH_TYPE;
+ if (ctx->negotiated_ctx_id)
+ sc_flags |= SC_NEGOTIATED_CTX_ID;
+ if (ctx->mech_flags)
+ sc_flags |= SC_MECH_FLAGS;
+ if (ctx->mech_time_rec != GSS_C_INDEFINITE)
+ sc_flags |= SC_MECH_TIME_REC;
+ if (ctx->mech_src_name)
+ sc_flags |= SC_MECH_SRC_NAME;
+ if (ctx->target_name)
+ sc_flags |= SC_TARGET_NAME;
+ if (ctx->negoex_step)
+ sc_flags |= SC_NEGOEX;
+
+ CHECK(ret, krb5_store_uint16(sp, sc_flags));
+ spnego_flags = spnego_flags_to_int(ctx->flags);
+ CHECK(ret, krb5_store_uint16(sp, spnego_flags));
+
+ if (sc_flags & SC_MECH_TYPES)
+ CHECK(major, _gss_mg_store_buffer(&minor, sp, &ctx->NegTokenInit_mech_types));
+ if (sc_flags & SC_PREFERRED_MECH_TYPE)
+ CHECK(major, _gss_mg_store_oid(&minor, sp, ctx->preferred_mech_type));
+ if (sc_flags & SC_SELECTED_MECH_TYPE)
+ CHECK(major, _gss_mg_store_oid(&minor, sp, ctx->selected_mech_type));
+ if (sc_flags & SC_NEGOTIATED_MECH_TYPE)
+ CHECK(major, _gss_mg_store_oid(&minor, sp, ctx->negotiated_mech_type));
+ if (sc_flags & SC_NEGOTIATED_CTX_ID) {
+ CHECK(major, gss_export_sec_context(&minor, &ctx->negotiated_ctx_id,
+ &buf));
+ CHECK(major, _gss_mg_store_buffer(&minor, sp, &buf));
+ gss_release_buffer(&minor, &buf);
+ }
+ if (sc_flags & SC_MECH_FLAGS)
+ CHECK(ret, krb5_store_uint32(sp, ctx->mech_flags));
+ if (sc_flags & SC_MECH_TIME_REC)
+ CHECK(ret, krb5_store_uint32(sp, ctx->mech_time_rec));
+ if (sc_flags & SC_MECH_SRC_NAME) {
+ CHECK(major, gss_export_name(&minor, ctx->mech_src_name, &buf));
+ CHECK(major, _gss_mg_store_buffer(&minor, sp, &buf));
+ gss_release_buffer(&minor, &buf);
+ }
+
+ if (sc_flags & SC_TARGET_NAME) {
+ CHECK(major, gss_export_name(&minor, ctx->target_name, &buf));
+ CHECK(major, _gss_mg_store_buffer(&minor, sp, &buf));
+ gss_release_buffer(&minor, &buf);
+ }
+
+ if (sc_flags & SC_NEGOEX) {
+ uint32_t nschemes;
+ struct negoex_auth_mech *mech;
+
+ CHECK(ret, krb5_store_uint8(sp, ctx->negoex_step));
+
+ if (ctx->negoex_transcript) {
+ CHECK(ret, krb5_storage_to_data(ctx->negoex_transcript, &data));
+ }
+ CHECK(ret, krb5_store_data(sp, data));
+ krb5_data_free(&data);
+
+ CHECK(ret, krb5_store_uint32(sp, ctx->negoex_seqnum));
+ CHECK(ret, krb5_store_bytes(sp, ctx->negoex_conv_id, GUID_LENGTH));
+
+ nschemes = 0;
+ HEIM_TAILQ_FOREACH(mech, &ctx->negoex_mechs, links)
+ nschemes++;
+
+ if (nschemes > 0xff) {
+ ret = ERANGE;
+ goto fail;
+ }
+ CHECK(ret, krb5_store_uint8(sp, nschemes));
+
+ HEIM_TAILQ_FOREACH(mech, &ctx->negoex_mechs, links)
+ CHECK(ret, store_negoex_auth_mech(sp, mech));
+ }
+
+fail:
+ if (ret == 0 && GSS_ERROR(major))
+ ret = minor ? minor : KRB5_BAD_MSIZE;
+ krb5_data_free(&data);
+ gss_release_buffer(&minor, &buf);
+
+ return ret;
+}
+
+static krb5_error_code
+ret_negoex_auth_mech(krb5_storage *sp, struct negoex_auth_mech **mechp)
+{
+ krb5_error_code ret;
+ OM_uint32 major = GSS_S_COMPLETE, minor;
+ gss_buffer_desc buf = GSS_C_EMPTY_BUFFER;
+ struct negoex_auth_mech *mech;
+ krb5_context context = _gss_mg_krb5_context();
+ uint8_t snc_flags, negoex_flags;
+
+ *mechp = NULL;
+
+ mech = calloc(1, sizeof(*mech));
+ if (mech == NULL) {
+ ret = ENOMEM;
+ goto fail;
+ }
+
+ CHECK(ret, krb5_ret_uint8(sp, &snc_flags));
+ CHECK(ret, krb5_ret_uint8(sp, &negoex_flags));
+ if (negoex_flags & (1 << 0))
+ mech->complete = 1;
+ if (negoex_flags & (1 << 1))
+ mech->sent_checksum = 1;
+ if (negoex_flags & (1 << 2))
+ mech->verified_checksum = 1;
+
+ if (snc_flags & SNC_OID)
+ CHECK(major, _gss_mg_ret_oid(&minor, sp, &mech->oid));
+
+ if (krb5_storage_read(sp, mech->scheme, GUID_LENGTH) != GUID_LENGTH) {
+ ret = KRB5_BAD_MSIZE;
+ goto fail;
+ }
+
+ if (snc_flags & SNC_MECH_CONTEXT) {
+ CHECK(major, _gss_mg_ret_buffer(&minor, sp, &buf));
+ CHECK(major, gss_import_sec_context(&minor, &buf,
+ &mech->mech_context));
+ gss_release_buffer(&minor, &buf);
+ }
+
+ if (snc_flags & SNC_METADATA)
+ CHECK(major, _gss_mg_ret_buffer(&minor, sp, &mech->metadata));
+
+ *mechp = mech;
+
+fail:
+ if (ret == 0 && GSS_ERROR(major))
+ ret = minor ? minor : KRB5_BAD_MSIZE;
+ if (ret)
+ _gss_negoex_release_auth_mech(context, mech);
+ gss_release_buffer(&minor, &buf);
+
+ return ret;
+}
+
+static krb5_error_code
+store_negoex_auth_mech(krb5_storage *sp, struct negoex_auth_mech *mech)
+{
+ krb5_error_code ret;
+ OM_uint32 major = GSS_S_COMPLETE, minor;
+ gss_buffer_desc buf = GSS_C_EMPTY_BUFFER;
+ uint8_t negoex_flags = 0, snc_flags = 0;
+
+ negoex_flags = 0;
+ if (mech->complete)
+ negoex_flags |= (1 << 0);
+ if (mech->sent_checksum)
+ negoex_flags |= (1 << 1);
+ if (mech->verified_checksum)
+ negoex_flags |= (1 << 2);
+
+ if (mech->oid)
+ snc_flags |= SNC_OID;
+ if (mech->mech_context)
+ snc_flags |= SNC_MECH_CONTEXT;
+ if (mech->metadata.length)
+ snc_flags |= SNC_METADATA;
+
+ CHECK(ret, krb5_store_uint8(sp, snc_flags));
+ CHECK(ret, krb5_store_uint8(sp, negoex_flags));
+
+ if (snc_flags & SNC_OID)
+ CHECK(major, _gss_mg_store_oid(&minor, sp, mech->oid));
+
+ CHECK(ret, krb5_store_bytes(sp, mech->scheme, GUID_LENGTH));
+
+ if (snc_flags & SNC_MECH_CONTEXT) {
+ CHECK(major, gss_export_sec_context(&minor, &mech->mech_context,
+ &buf));
+ CHECK(major, _gss_mg_store_buffer(&minor, sp, &buf));
+ gss_release_buffer(&minor, &buf);
+ }
+
+ if (snc_flags & SNC_METADATA)
+ CHECK(major, _gss_mg_store_buffer(&minor, sp, &mech->metadata));
+
+fail:
+ if (ret == 0 && GSS_ERROR(major))
+ ret = minor ? minor : KRB5_BAD_MSIZE;
+ gss_release_buffer(&minor, &buf);
+
+ return ret;
+}
+
+static uint16_t
+spnego_flags_to_int(struct spnego_flags flags)
+{
+ uint16_t f = 0;
+
+ if (flags.open)
+ f |= (1 << 0);
+ if (flags.local)
+ f |= (1 << 1);
+ if (flags.require_mic)
+ f |= (1 << 2);
+ if (flags.peer_require_mic)
+ f |= (1 << 3);
+ if (flags.sent_mic)
+ f |= (1 << 4);
+ if (flags.verified_mic)
+ f |= (1 << 5);
+ if (flags.safe_omit)
+ f |= (1 << 6);
+ if (flags.maybe_open)
+ f |= (1 << 7);
+ if (flags.seen_supported_mech)
+ f |= (1 << 8);
+
+ return f;
+}
+
+static struct spnego_flags
+int_to_spnego_flags(uint16_t f)
+{
+ struct spnego_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+
+ if (f & (1 << 0))
+ flags.open = 1;
+ if (f & (1 << 1))
+ flags.local = 1;
+ if (f & (1 << 2))
+ flags.require_mic = 1;
+ if (f & (1 << 3))
+ flags.peer_require_mic = 1;
+ if (f & (1 << 4))
+ flags.sent_mic = 1;
+ if (f & (1 << 5))
+ flags.verified_mic = 1;
+ if (f & (1 << 6))
+ flags.safe_omit = 1;
+ if (f & (1 << 7))
+ flags.maybe_open = 1;
+ if (f & (1 << 8))
+ flags.seen_supported_mech = 1;
+
+ return flags;
+}
diff --git a/third_party/heimdal/lib/gssapi/spnego/context_stubs.c b/third_party/heimdal/lib/gssapi/spnego/context_stubs.c
new file mode 100644
index 00000000000..638e90d7ba3
--- /dev/null
+++ b/third_party/heimdal/lib/gssapi/spnego/context_stubs.c
@@ -0,0 +1,578 @@
+/*
+ * Copyright (c) 2004, PADL Software Pty Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of PADL Software nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL PADL SOFTWARE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "spnego_locl.h"
+
+OM_uint32 GSSAPI_CALLCONV _gss_spnego_process_context_token
+ (OM_uint32 *minor_status,
+ gss_const_ctx_id_t context_handle,
+ const gss_buffer_t token_buffer
+ )
+{
+ gss_ctx_id_t context;
+ gssspnego_ctx ctx;
+ OM_uint32 ret;
+
+ if (context_handle == GSS_C_NO_CONTEXT)
+ return GSS_S_NO_CONTEXT;
+
+ context = (gss_ctx_id_t)context_handle;
+ ctx = (gssspnego_ctx)context_handle;
+
+ HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
+
+ ret = gss_process_context_token(minor_status,
+ ctx->negotiated_ctx_id,
+ token_buffer);
+ if (ret != GSS_S_COMPLETE) {
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
+ return ret;
+ }
+
+ ctx->negotiated_ctx_id = GSS_C_NO_CONTEXT;
+
+ return _gss_spnego_internal_delete_sec_context(minor_status,
+ &context,
+ GSS_C_NO_BUFFER);
+}
+
+OM_uint32 GSSAPI_CALLCONV _gss_spnego_delete_sec_context
+ (OM_uint32 *minor_status,
+ gss_ctx_id_t *context_handle,
+ gss_buffer_t output_token
+ )
+{
+ gssspnego_ctx ctx;
+
+ if (context_handle == NULL || *context_handle == GSS_C_NO_CONTEXT)
+ return GSS_S_NO_CONTEXT;
+
+ ctx = (gssspnego_ctx)*context_handle;
+
+ HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
+
+ return _gss_spnego_internal_delete_sec_context(minor_status,
+ context_handle,
+ output_token);
+}
+
+OM_uint32 GSSAPI_CALLCONV _gss_spnego_context_time
+ (OM_uint32 *minor_status,
+ gss_const_ctx_id_t context_handle,
+ OM_uint32 *time_rec
+ )
+{
+ gssspnego_ctx ctx;
+ *minor_status = 0;
+
+ if (context_handle == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ ctx = (gssspnego_ctx)context_handle;
+
+ if (ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ return gss_context_time(minor_status,
+ ctx->negotiated_ctx_id,
+ time_rec);
+}
+
+OM_uint32 GSSAPI_CALLCONV _gss_spnego_get_mic
+ (OM_uint32 *minor_status,
+ gss_const_ctx_id_t context_handle,
+ gss_qop_t qop_req,
+ const gss_buffer_t message_buffer,
+ gss_buffer_t message_token
+ )
+{
+ gssspnego_ctx ctx;
+
+ *minor_status = 0;
+
+ if (context_handle == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ ctx = (gssspnego_ctx)context_handle;
+
+ if (ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ return gss_get_mic(minor_status, ctx->negotiated_ctx_id,
+ qop_req, message_buffer, message_token);
+}
+
+OM_uint32 GSSAPI_CALLCONV _gss_spnego_verify_mic
+ (OM_uint32 * minor_status,
+ gss_const_ctx_id_t context_handle,
+ const gss_buffer_t message_buffer,
+ const gss_buffer_t token_buffer,
+ gss_qop_t * qop_state
+ )
+{
+ gssspnego_ctx ctx;
+
+ *minor_status = 0;
+
+ if (context_handle == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ ctx = (gssspnego_ctx)context_handle;
+
+ if (ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ return gss_verify_mic(minor_status,
+ ctx->negotiated_ctx_id,
+ message_buffer,
+ token_buffer,
+ qop_state);
+}
+
+OM_uint32 GSSAPI_CALLCONV _gss_spnego_wrap
+ (OM_uint32 * minor_status,
+ gss_const_ctx_id_t context_handle,
+ int conf_req_flag,
+ gss_qop_t qop_req,
+ const gss_buffer_t input_message_buffer,
+ int * conf_state,
+ gss_buffer_t output_message_buffer
+ )
+{
+ gssspnego_ctx ctx;
+
+ *minor_status = 0;
+
+ if (context_handle == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ ctx = (gssspnego_ctx)context_handle;
+
+ if (ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ return gss_wrap(minor_status,
+ ctx->negotiated_ctx_id,
+ conf_req_flag,
+ qop_req,
+ input_message_buffer,
+ conf_state,
+ output_message_buffer);
+}
+
+OM_uint32 GSSAPI_CALLCONV _gss_spnego_unwrap
+ (OM_uint32 * minor_status,
+ gss_const_ctx_id_t context_handle,
+ const gss_buffer_t input_message_buffer,
+ gss_buffer_t output_message_buffer,
+ int * conf_state,
+ gss_qop_t * qop_state
+ )
+{
+ gssspnego_ctx ctx;
+
+ *minor_status = 0;
+
+ if (context_handle == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ ctx = (gssspnego_ctx)context_handle;
+
+ if (ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ return gss_unwrap(minor_status,
+ ctx->negotiated_ctx_id,
+ input_message_buffer,
+ output_message_buffer,
+ conf_state,
+ qop_state);
+}
+
+OM_uint32 GSSAPI_CALLCONV _gss_spnego_inquire_context (
+ OM_uint32 * minor_status,
+ gss_const_ctx_id_t context_handle,
+ gss_name_t * src_name,
+ gss_name_t * targ_name,
+ OM_uint32 * lifetime_rec,
+ gss_OID * mech_type,
+ OM_uint32 * ctx_flags,
+ int * locally_initiated,
+ int * open_context
+ )
+{
+ gssspnego_ctx ctx;
+ OM_uint32 maj_stat;
+
+ *minor_status = 0;
+
+ if (context_handle == GSS_C_NO_CONTEXT)
+ return GSS_S_NO_CONTEXT;
+
+ ctx = (gssspnego_ctx)context_handle;
+
+ if (ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT)
+ return GSS_S_NO_CONTEXT;
+
+ maj_stat = gss_inquire_context(minor_status,
+ ctx->negotiated_ctx_id,
+ src_name,
+ targ_name,
+ lifetime_rec,
+ mech_type,
+ ctx_flags,
+ locally_initiated,
+ open_context);
+
+ if (open_context)
+ *open_context = gssspnego_ctx_complete_p(ctx);
+
+ return maj_stat;
+}
+
+OM_uint32 GSSAPI_CALLCONV _gss_spnego_wrap_size_limit (
+ OM_uint32 * minor_status,
+ gss_const_ctx_id_t context_handle,
+ int conf_req_flag,
+ gss_qop_t qop_req,
+ OM_uint32 req_output_size,
+ OM_uint32 * max_input_size
+ )
+{
+ gssspnego_ctx ctx;
+
+ *minor_status = 0;
+
+ if (context_handle == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ ctx = (gssspnego_ctx)context_handle;
+
+ if (ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ return gss_wrap_size_limit(minor_status,
+ ctx->negotiated_ctx_id,
+ conf_req_flag,
+ qop_req,
+ req_output_size,
+ max_input_size);
+}
+
+OM_uint32 GSSAPI_CALLCONV _gss_spnego_export_sec_context (
+ OM_uint32 * minor_status,
+ gss_ctx_id_t * context_handle,
+ gss_buffer_t interprocess_token
+ )
+{
+ gssspnego_ctx ctx;
+ OM_uint32 major_status;
+
+ *minor_status = 0;
+
+ if (context_handle == NULL)
+ return GSS_S_NO_CONTEXT;
+
+ ctx = (gssspnego_ctx)*context_handle;
+
+ if (ctx == NULL)
+ return GSS_S_NO_CONTEXT;
+
+ HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
+
+ /*
+ * Partial context export is only supported on the acceptor side, as we
+ * cannot represent the initiator function pointer state in an exported
+ * token, and also because it is mostly useful for acceptors which need
+ * to manage multiple initiator states.
+ */
+ if (ctx->flags.local && !gssspnego_ctx_complete_p(ctx)) {
+ major_status = GSS_S_NO_CONTEXT;
+ goto out;
+ }
+
+ major_status = _gss_spnego_export_sec_context_internal(minor_status,
+ ctx,
+ interprocess_token);
+
+out:
+ if (major_status == GSS_S_COMPLETE)
+ major_status = _gss_spnego_internal_delete_sec_context(minor_status,
+ context_handle,
+ GSS_C_NO_BUFFER);
+ else
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
+
+ return major_status;
+}
+
+OM_uint32 GSSAPI_CALLCONV _gss_spnego_import_sec_context (
+ OM_uint32 * minor_status,
+ const gss_buffer_t interprocess_token,
+ gss_ctx_id_t *context_handle
+ )
+{
+ return _gss_spnego_import_sec_context_internal(minor_status,
+ interprocess_token,
+ (gssspnego_ctx *)context_handle);
+}
+
+OM_uint32 GSSAPI_CALLCONV _gss_spnego_inquire_names_for_mech (
+ OM_uint32 * minor_status,
+ const gss_OID mechanism,
+ gss_OID_set * name_types
+ )
+{
+ gss_OID_set mechs, names, n;
+ OM_uint32 ret, junk;
+ size_t i, j;
+
+ *name_types = NULL;
+
+ ret = _gss_spnego_indicate_mechs(minor_status, &mechs);
+ if (ret != GSS_S_COMPLETE)
+ return ret;
+
+ ret = gss_create_empty_oid_set(minor_status, &names);
+ if (ret != GSS_S_COMPLETE)
+ goto out;
+
+ for (i = 0; i < mechs->count; i++) {
+ ret = gss_inquire_names_for_mech(minor_status,
+ &mechs->elements[i],
+ &n);
+ if (ret)
+ continue;
+
+ for (j = 0; j < n->count; j++)
+ gss_add_oid_set_member(minor_status,
+ &n->elements[j],
+ &names);
+ gss_release_oid_set(&junk, &n);
+ }
+
+ ret = GSS_S_COMPLETE;
+ *name_types = names;
+out:
+
+ gss_release_oid_set(&junk, &mechs);
+
+ return ret;
+}
+
+OM_uint32 GSSAPI_CALLCONV
+_gss_spnego_wrap_iov(OM_uint32 * minor_status,
+ gss_ctx_id_t context_handle,
+ int conf_req_flag,
+ gss_qop_t qop_req,
+ int * conf_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count)
+{
+ gssspnego_ctx ctx = (gssspnego_ctx)context_handle;
+
+ *minor_status = 0;
+
+ if (ctx == NULL || ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT)
+ return GSS_S_NO_CONTEXT;
+
+ return gss_wrap_iov(minor_status, ctx->negotiated_ctx_id,
+ conf_req_flag, qop_req, conf_state,
+ iov, iov_count);
+}
+
+OM_uint32 GSSAPI_CALLCONV
+_gss_spnego_unwrap_iov(OM_uint32 *minor_status,
+ gss_ctx_id_t context_handle,
+ int *conf_state,
+ gss_qop_t *qop_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count)
+{
+ gssspnego_ctx ctx = (gssspnego_ctx)context_handle;
+
+ *minor_status = 0;
+
+ if (ctx == NULL || ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT)
+ return GSS_S_NO_CONTEXT;
+
+ return gss_unwrap_iov(minor_status,
+ ctx->negotiated_ctx_id,
+ conf_state, qop_state,
+ iov, iov_count);
+}
+
+OM_uint32 GSSAPI_CALLCONV
+_gss_spnego_wrap_iov_length(OM_uint32 * minor_status,
+ gss_ctx_id_t context_handle,
+ int conf_req_flag,
+ gss_qop_t qop_req,
+ int *conf_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count)
+{
+ gssspnego_ctx ctx = (gssspnego_ctx)context_handle;
+
+ *minor_status = 0;
+
+ if (ctx == NULL || ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT)
+ return GSS_S_NO_CONTEXT;
+
+ return gss_wrap_iov_length(minor_status, ctx->negotiated_ctx_id,
+ conf_req_flag, qop_req, conf_state,
+ iov, iov_count);
+}
+
+#if 0
+OM_uint32 GSSAPI_CALLCONV _gss_spnego_complete_auth_token
+ (OM_uint32 * minor_status,
+ gss_const_ctx_id_t context_handle,
+ gss_buffer_t input_message_buffer)
+{
+ gssspnego_ctx ctx;
+
+ *minor_status = 0;
+
+ if (context_handle == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ ctx = (gssspnego_ctx)context_handle;
+
+ if (ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ return gss_complete_auth_token(minor_status,
+ ctx->negotiated_ctx_id,
+ input_message_buffer);
+}
+#endif
+
+OM_uint32 GSSAPI_CALLCONV _gss_spnego_inquire_sec_context_by_oid
+ (OM_uint32 * minor_status,
+ gss_const_ctx_id_t context_handle,
+ const gss_OID desired_object,
+ gss_buffer_set_t *data_set)
+{
+ gssspnego_ctx ctx;
+
+ *minor_status = 0;
+
+ if (context_handle == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ ctx = (gssspnego_ctx)context_handle;
+
+ if (ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ return gss_inquire_sec_context_by_oid(minor_status,
+ ctx->negotiated_ctx_id,
+ desired_object,
+ data_set);
+}
+
+OM_uint32 GSSAPI_CALLCONV _gss_spnego_set_sec_context_option
+ (OM_uint32 * minor_status,
+ gss_ctx_id_t * context_handle,
+ const gss_OID desired_object,
+ const gss_buffer_t value)
+{
+ gssspnego_ctx ctx;
+
+ *minor_status = 0;
+
+ /*
+ * Return GSS_S_UNAVAILABLE with a NULL context handle as at
+ * present no context options can be set globally on SPNEGO
+ * itself. Global mechanism context options are set directly
+ * on the mechanism; per-context context options are set below
+ * if ctx->negotiated_ctx_id != GSS_C_NO_CONTEXT.
+ */
+ if (context_handle == NULL || *context_handle == GSS_C_NO_CONTEXT)
+ return GSS_S_UNAVAILABLE;
+
+ ctx = (gssspnego_ctx)*context_handle;
+
+ if (ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT) {
+ return GSS_S_NO_CONTEXT;
+ }
+
+ return gss_set_sec_context_option(minor_status,
+ &ctx->negotiated_ctx_id,
+ desired_object,
+ value);
+}
+
+
+OM_uint32 GSSAPI_CALLCONV
+_gss_spnego_pseudo_random(OM_uint32 *minor_status,
+ gss_ctx_id_t context_handle,
+ int prf_key,
+ const gss_buffer_t prf_in,
+ ssize_t desired_output_len,
+ gss_buffer_t prf_out)
+{
+ gssspnego_ctx ctx;
+
+ *minor_status = 0;
+
+ if (context_handle == GSS_C_NO_CONTEXT)
+ return GSS_S_NO_CONTEXT;
+
+ ctx = (gssspnego_ctx)context_handle;
+
+ if (ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT)
+ return GSS_S_NO_CONTEXT;
+
+ return gss_pseudo_random(minor_status,
+ ctx->negotiated_ctx_id,
+ prf_key,
+ prf_in,
+ desired_output_len,
+ prf_out);
+}
diff --git a/third_party/heimdal/lib/gssapi/spnego/external.c b/third_party/heimdal/lib/gssapi/spnego/external.c
new file mode 100644
index 00000000000..2a5121efa83
--- /dev/null
+++ b/third_party/heimdal/lib/gssapi/spnego/external.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2004, PADL Software Pty Ltd.
+ * Copyright (c) 2018 Kungliga Tekniska Högskolan
+ * (Royal Institute of Technology, Stockholm, Sweden).
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of PADL Software nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL PADL SOFTWARE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "spnego_locl.h"
+#include <gssapi_mech.h>
+
+/*
+ * RFC2478, SPNEGO:
+ * The security mechanism of the initial
+ * negotiation token is identified by the Object Identifier
+ * iso.org.dod.internet.security.mechanism.snego (1.3.6.1.5.5.2).
+ */
+static gss_mo_desc spnego_mo[] = {
+ {
+ GSS_C_MA_SASL_MECH_NAME,
+ GSS_MO_MA,
+ "SASL mech name",
+ rk_UNCONST("SPNEGO"),
+ _gss_mo_get_ctx_as_string,
+ NULL
+ },
+ {
+ GSS_C_MA_MECH_NAME,
+ GSS_MO_MA,
+ "Mechanism name",
+ rk_UNCONST("SPNEGO"),
+ _gss_mo_get_ctx_as_string,
+ NULL
+ },
+ {
+ GSS_C_MA_MECH_DESCRIPTION,
+ GSS_MO_MA,
+ "Mechanism description",
+ rk_UNCONST("Heimdal SPNEGO Mechanism"),
+ _gss_mo_get_ctx_as_string,
+ NULL
+ },
+ {
+ GSS_C_MA_MECH_NEGO,
+ GSS_MO_MA,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ GSS_C_MA_MECH_PSEUDO,
+ GSS_MO_MA,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ }
+};
+
+static gssapi_mech_interface_desc spnego_mech = {
+ GMI_VERSION,
+ "spnego",
+ {6, rk_UNCONST("\x2b\x06\x01\x05\x05\x02") },
+ GM_USE_MG_CRED | GM_USE_MG_NAME,
+ NULL, /* gm_acquire_cred */
+ NULL, /* gm_release_cred */
+ _gss_spnego_init_sec_context,
+ _gss_spnego_accept_sec_context,
+ _gss_spnego_process_context_token,
+ _gss_spnego_delete_sec_context,
+ _gss_spnego_context_time,
+ _gss_spnego_get_mic,
+ _gss_spnego_verify_mic,
+ _gss_spnego_wrap,
+ _gss_spnego_unwrap,
+ NULL, /* gm_display_status */
+ NULL, /* gm_indicate_mechs */
+ NULL, /* gm_compare_name */
+ NULL, /* gm_display_name */
+ NULL, /* gm_import_name */
+ NULL, /* gm_export_name */
+ NULL, /* gm_release_name */
+ NULL, /* gm_inquire_cred */
+ _gss_spnego_inquire_context,
+ _gss_spnego_wrap_size_limit,
+ NULL, /* gm_add_cred */
+ NULL, /* gm_inquire_cred_by_mech */
+ _gss_spnego_export_sec_context,
+ _gss_spnego_import_sec_context,
+ NULL, /* gm_spnego_inquire_names_for_mech */
+ NULL, /* gm_spnego_inquire_mechs_for_name */
+ NULL, /* gm_spnego_canonicalize_name */
+ NULL, /* gm_spnego_duplicate_name */
+ _gss_spnego_inquire_sec_context_by_oid,
+ NULL, /* gm_inquire_cred_by_oid */
+ _gss_spnego_set_sec_context_option,
+ NULL, /* gm_set_cred_option */
+ _gss_spnego_pseudo_random,
+ _gss_spnego_wrap_iov,
+ _gss_spnego_unwrap_iov,
+ _gss_spnego_wrap_iov_length,
+ NULL,
+ NULL, /* gm_export_cred */
+ NULL, /* gm_import_cred */
+ NULL, /* gm_acquire_cred_from */
+ NULL, /* gm_acquire_cred_impersonate_name */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ spnego_mo,
+ sizeof(spnego_mo) / sizeof(spnego_mo[0]),
+ NULL, /* gm_localname */
+ NULL, /* gm_authorize_localname */
+ NULL, /* gm_display_name_ext */
+ NULL, /* gm_inquire_name */
+ NULL, /* gm_get_name_attribute */
+ NULL, /* gm_set_name_attribute */
+ NULL, /* gm_delete_name_attribute */
+ NULL, /* gm_export_name_composite */
+ NULL, /* gm_duplicate_cred */
+ NULL, /* gm_add_cred_from */
+ NULL, /* gm_store_cred_into */
+ NULL, /* gm_query_mechanism_info */
+ NULL, /* gm_query_meta_data */
+ NULL, /* gm_exchange_meta_data */
+ NULL, /* gm_store_cred_into2 */
+ NULL /* gm_compat */
+};
+
+gssapi_mech_interface
+__gss_spnego_initialize(void)
+{
+ return &spnego_mech;
+}
+
diff --git a/third_party/heimdal/lib/gssapi/spnego/init_sec_context.c b/third_party/heimdal/lib/gssapi/spnego/init_sec_context.c
new file mode 100644
index 00000000000..12ec0ea4106
--- /dev/null
+++ b/third_party/heimdal/lib/gssapi/spnego/init_sec_context.c
@@ -0,0 +1,841 @@
+/*
+ * Copyright (c) 1997 - 2004 Kungliga Tekniska Högskolan
+ * (Royal Institute of Technology, Stockholm, Sweden).
+ * Portions Copyright (c) 2004 PADL Software Pty Ltd.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Institute nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "spnego_locl.h"
+
+#define GSISC(name) \
+static \
+OM_uint32 name(OM_uint32 *, gss_const_cred_id_t, gssspnego_ctx, \
+ gss_const_name_t, gss_const_OID, \
+ OM_uint32, OM_uint32, const gss_channel_bindings_t, \
+ gss_const_buffer_t, gss_buffer_t, \
+ OM_uint32 *, OM_uint32 *)
+
+GSISC(spnego_initial);
+GSISC(spnego_reply);
+GSISC(wait_server_mic);
+GSISC(step_completed);
+
+
+ /*
+ * Is target_name an sane target for `mech´.
+ */
+
+static OM_uint32
+initiator_approved(OM_uint32 *minor_status,
+ void *userptr,
+ gss_const_name_t target_name,
+ gss_const_cred_id_t cred,
+ gss_OID mech)
+{
+ OM_uint32 min_stat, maj_stat;
+ gss_ctx_id_t ctx = GSS_C_NO_CONTEXT;
+ gss_buffer_desc out;
+ struct gssspnego_optimistic_ctx *sel = userptr;
+ gss_OID negotiated_mech_type = GSS_C_NO_OID;
+ OM_uint32 flags = 0, time_rec = 0;
+ auth_scheme scheme;
+ int negoex = 0;
+
+ maj_stat = gss_init_sec_context(&min_stat,
+ cred,
+ &ctx,
+ sel->target_name,
+ mech,
+ sel->req_flags,
+ sel->time_req,
+ sel->input_chan_bindings,
+ GSS_C_NO_BUFFER,
+ &negotiated_mech_type,
+ &out,
+ &flags,
+ &time_rec);
+ if (GSS_ERROR(maj_stat)) {
+ gss_mg_collect_error(mech, maj_stat, min_stat);
+ *minor_status = min_stat;
+ return maj_stat;
+ }
+
+ if (gssspi_query_mechanism_info(&min_stat, mech, scheme) == GSS_S_COMPLETE)
+ negoex = 1;
+
+ if (sel->preferred_mech_type == GSS_C_NO_OID) {
+ sel->preferred_mech_type = mech;
+ sel->negotiated_mech_type = negotiated_mech_type;
+ sel->optimistic_token = out;
+ sel->optimistic_flags = flags;
+ sel->optimistic_time_rec = time_rec;
+ sel->gssctx = ctx;
+ if (maj_stat == GSS_S_COMPLETE)
+ sel->complete = 1;
+ if (negoex)
+ memcpy(sel->scheme, scheme, GUID_LENGTH);
+ } else {
+ gss_release_buffer(&min_stat, &out);
+ gss_delete_sec_context(&min_stat, &ctx, NULL);
+ }
+
+ maj_stat = GSS_S_COMPLETE;
+
+ if (negoex) {
+ maj_stat = _gss_negoex_add_auth_mech(minor_status, sel->spnegoctx,
+ mech, scheme);
+ }
+
+ return maj_stat;
+}
+
+/*
+ * Send a reply. Note that we only need to send a reply if we
+ * need to send a MIC or a mechanism token. Otherwise, we can
+ * return an empty buffer.
+ *
+ * The return value of this will be returned to the API, so it
+ * must return GSS_S_CONTINUE_NEEDED if a token was generated.
+ */
+static OM_uint32
+make_reply(OM_uint32 *minor_status,
+ gssspnego_ctx ctx,
+ gss_buffer_t mech_token,
+ gss_buffer_t output_token)
+{
+ NegotiationToken nt;
+ gss_buffer_desc mic_buf;
+ OM_uint32 ret, minor;
+ size_t size;
+ NegStateEnum state;
+
+ memset(&nt, 0, sizeof(nt));
+
+ nt.element = choice_NegotiationToken_negTokenResp;
+
+ nt.u.negTokenResp.negState = NULL;
+ nt.u.negTokenResp.supportedMech = NULL;
+
+ output_token->length = 0;
+ output_token->value = NULL;
+
+ /* figure out our status */
+
+ if (ctx->flags.open) {
+ if (ctx->flags.verified_mic == 1 || ctx->flags.require_mic == 0)
+ state = accept_completed;
+ else
+ state = accept_incomplete;
+ } else {
+ state = accept_incomplete;
+ }
+
+ if (mech_token->length == 0) {
+ nt.u.negTokenResp.responseToken = NULL;
+ } else {
+ ALLOC(nt.u.negTokenResp.responseToken, 1);
+ if (nt.u.negTokenResp.responseToken == NULL) {
+ free_NegotiationToken(&nt);
+ *minor_status = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+ nt.u.negTokenResp.responseToken->length = mech_token->length;
+ nt.u.negTokenResp.responseToken->data = mech_token->value;
+ mech_token->length = 0;
+ mech_token->value = NULL;
+ }
+
+ /*
+ * XXX should limit when we send the MIC ?
+ */
+ if (ctx->flags.open && ctx->flags.sent_mic == 0) {
+
+ ctx->flags.sent_mic = 1;
+
+ ret = gss_get_mic(minor_status,
+ ctx->negotiated_ctx_id,
+ 0,
+ &ctx->NegTokenInit_mech_types,
+ &mic_buf);
+ if (ret == GSS_S_COMPLETE) {
+ _gss_spnego_ntlm_reset_crypto(&minor, ctx, FALSE);
+
+ ALLOC(nt.u.negTokenResp.mechListMIC, 1);
+ if (nt.u.negTokenResp.mechListMIC == NULL) {
+ gss_release_buffer(minor_status, &mic_buf);
+ free_NegotiationToken(&nt);
+ *minor_status = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+
+ nt.u.negTokenResp.mechListMIC->length = mic_buf.length;
+ nt.u.negTokenResp.mechListMIC->data = mic_buf.value;
+ /* mic_buf free()d with nt */
+ } else if (ret == GSS_S_UNAVAILABLE) {
+ /* lets hope that its ok to not send te mechListMIC for broken mechs */
+ nt.u.negTokenResp.mechListMIC = NULL;
+ ctx->flags.require_mic = 0;
+ } else {
+ free_NegotiationToken(&nt);
+ *minor_status = ENOMEM;
+ return gss_mg_set_error_string(GSS_SPNEGO_MECHANISM,
+ ret, *minor_status,
+ "SPNEGO failed to sign MIC");
+ }
+ } else {
+ nt.u.negTokenResp.mechListMIC = NULL;
+ }
+
+ ALLOC(nt.u.negTokenResp.negState, 1);
+ if (nt.u.negTokenResp.negState == NULL) {
+ free_NegotiationToken(&nt);
+ *minor_status = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+ *nt.u.negTokenResp.negState = state;
+
+ ASN1_MALLOC_ENCODE(NegotiationToken,
+ output_token->value, output_token->length,
+ &nt, &size, ret);
+ free_NegotiationToken(&nt);
+ if (ret) {
+ *minor_status = ret;
+ return GSS_S_FAILURE;
+ }
+
+ if (state != accept_completed)
+ return GSS_S_CONTINUE_NEEDED;
+
+ return GSS_S_COMPLETE;
+}
+
+static OM_uint32
+spnego_initial(OM_uint32 * minor_status,
+ gss_const_cred_id_t cred,
+ gssspnego_ctx ctx,
+ gss_const_name_t target_name,
+ gss_const_OID mech_type,
+ OM_uint32 req_flags,
+ OM_uint32 time_req,
+ const gss_channel_bindings_t input_chan_bindings,
+ gss_const_buffer_t input_token,
+ gss_buffer_t output_token,
+ OM_uint32 * ret_flags,
+ OM_uint32 * time_rec)
+{
+ NegotiationToken nt;
+ int ret;
+ OM_uint32 sub, minor;
+ gss_buffer_desc mech_token;
+ size_t size = 0;
+ gss_buffer_desc data;
+ struct gssspnego_optimistic_ctx sel;
+
+ *minor_status = 0;
+
+ memset(&nt, 0, sizeof(nt));
+
+ if (target_name == GSS_C_NO_NAME)
+ return GSS_S_BAD_NAME;
+
+ sub = gss_duplicate_name(&minor, target_name, &ctx->target_name);
+ if (GSS_ERROR(sub)) {
+ *minor_status = minor;
+ return sub;
+ }
+
+ nt.element = choice_NegotiationToken_negTokenInit;
+
+ ctx->flags.local = 1;
+
+ memset(&sel, 0, sizeof(sel));
+
+ sel.spnegoctx = ctx;
+ sel.target_name = ctx->target_name;
+ sel.preferred_mech_type = GSS_C_NO_OID;
+ sel.req_flags = req_flags;
+ sel.time_req = time_req;
+ sel.input_chan_bindings = (gss_channel_bindings_t)input_chan_bindings;
+
+ sub = _gss_spnego_indicate_mechtypelist(&minor,
+ ctx->target_name,
+ req_flags,
+ initiator_approved,
+ &sel,
+ 0,
+ cred,
+ &nt.u.negTokenInit.mechTypes,
+ &ctx->preferred_mech_type);
+ if (GSS_ERROR(sub)) {
+ *minor_status = minor;
+ return sub;
+ }
+
+ _gss_spnego_log_mechTypes(&nt.u.negTokenInit.mechTypes);
+
+ nt.u.negTokenInit.reqFlags = NULL;
+
+ if (gss_oid_equal(ctx->preferred_mech_type, GSS_NEGOEX_MECHANISM)) {
+ struct negoex_auth_mech *mech;
+
+ sub = _gss_negoex_init(&minor,
+ &sel,
+ ctx,
+ (gss_cred_id_t)cred,
+ req_flags,
+ time_req,
+ input_chan_bindings,
+ GSS_C_NO_BUFFER,
+ &mech_token);
+ if (GSS_ERROR(sub)) {
+ free_NegotiationToken(&nt);
+ return gss_mg_set_error_string(GSS_C_NO_OID, sub, minor,
+ "NegoEx could not generate a context token");
+ }
+ mech = _gss_negoex_negotiated_mech(ctx);
+ ctx->flags.maybe_open = mech && mech->complete;
+ gss_release_buffer(&minor, &sel.optimistic_token);
+ } else {
+ /* optimistic token from selection context */
+ mech_token = sel.optimistic_token;
+ ctx->mech_flags = sel.optimistic_flags;
+ ctx->mech_time_rec = sel.optimistic_time_rec;
+ ctx->negotiated_mech_type = sel.negotiated_mech_type;
+ ctx->negotiated_ctx_id = sel.gssctx;
+ ctx->flags.maybe_open = sel.complete;
+ }
+
+ if (ctx->preferred_mech_type == GSS_C_NO_OID) {
+ free_NegotiationToken(&nt);
+ *minor_status = 0;
+ return gss_mg_set_error_string(GSS_C_NO_OID, GSS_S_NO_CONTEXT, 0,
+ "SPNEGO could not find a preferred mechanism");
+ }
+
+
+ if (mech_token.length != 0) {
+ ALLOC(nt.u.negTokenInit.mechToken, 1);
+ if (nt.u.negTokenInit.mechToken == NULL) {
+ free_NegotiationToken(&nt);
+ gss_release_buffer(&minor, &mech_token);
+ *minor_status = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+ nt.u.negTokenInit.mechToken->length = mech_token.length;
+ nt.u.negTokenInit.mechToken->data = malloc(mech_token.length);
+ if (nt.u.negTokenInit.mechToken->data == NULL && mech_token.length != 0) {
+ free_NegotiationToken(&nt);
+ gss_release_buffer(&minor, &mech_token);
+ *minor_status = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+ memcpy(nt.u.negTokenInit.mechToken->data, mech_token.value, mech_token.length);
+ gss_release_buffer(&minor, &mech_token);
+ } else
+ nt.u.negTokenInit.mechToken = NULL;
+
+ nt.u.negTokenInit.mechListMIC = NULL;
+
+ {
+ MechTypeList mt;
+
+ mt.len = nt.u.negTokenInit.mechTypes.len;
+ mt.val = nt.u.negTokenInit.mechTypes.val;
+
+ ASN1_MALLOC_ENCODE(MechTypeList,
+ ctx->NegTokenInit_mech_types.value,
+ ctx->NegTokenInit_mech_types.length,
+ &mt, &size, ret);
+ if (ret) {
+ *minor_status = ret;
+ free_NegotiationToken(&nt);
+ return GSS_S_FAILURE;
+ }
+ }
+
+ ASN1_MALLOC_ENCODE(NegotiationToken, data.value, data.length, &nt, &size, ret);
+ free_NegotiationToken(&nt);
+ if (ret) {
+ return GSS_S_FAILURE;
+ }
+ if (data.length != size)
+ abort();
+
+ sub = gss_encapsulate_token(&data,
+ GSS_SPNEGO_MECHANISM,
+ output_token);
+ free (data.value);
+
+ if (sub) {
+ return sub;
+ }
+
+ if (ret_flags)
+ *ret_flags = ctx->mech_flags;
+ if (time_rec)
+ *time_rec = ctx->mech_time_rec;
+
+ ctx->initiator_state = spnego_reply;
+
+ return GSS_S_CONTINUE_NEEDED;
+}
+
+/*
+ *
+ */
+
+static OM_uint32
+spnego_reply(OM_uint32 * minor_status,
+ gss_const_cred_id_t cred,
+ gssspnego_ctx ctx,
+ gss_const_name_t target_name,
+ gss_const_OID mech_type,
+ OM_uint32 req_flags,
+ OM_uint32 time_req,
+ const gss_channel_bindings_t input_chan_bindings,
+ gss_const_buffer_t input_token,
+ gss_buffer_t output_token,
+ OM_uint32 * ret_flags,
+ OM_uint32 * time_rec)
+{
+ OM_uint32 ret, minor;
+ NegotiationToken resp;
+ gss_buffer_desc mech_output_token;
+ NegStateEnum negState;
+
+ *minor_status = 0;
+
+ output_token->length = 0;
+ output_token->value = NULL;
+
+ mech_output_token.length = 0;
+ mech_output_token.value = NULL;
+
+ ret = decode_NegotiationToken(input_token->value, input_token->length,
+ &resp, NULL);
+ if (ret)
+ return ret;
+
+ /* The SPNEGO token must be a negTokenResp */
+ if (resp.element != choice_NegotiationToken_negTokenResp) {
+ free_NegotiationToken(&resp);
+ *minor_status = 0;
+ return GSS_S_BAD_MECH;
+ }
+
+ /*
+ * When negState is absent, the actual state should be inferred from
+ * the state of the negotiated mechanism context. (RFC 4178 4.2.2.)
+ */
+ if (resp.u.negTokenResp.negState != NULL)
+ negState = *resp.u.negTokenResp.negState;
+ else
+ negState = accept_incomplete;
+
+ /*
+ * Pick up the mechanism that the acceptor selected, only pick up
+ * the first selection.
+ */
+
+ if (ctx->selected_mech_type == GSS_C_NO_OID && resp.u.negTokenResp.supportedMech) {
+ gss_OID_desc oid;
+ size_t len;
+
+ ctx->flags.seen_supported_mech = 1;
+
+ oid.length = (OM_uint32)der_length_oid(resp.u.negTokenResp.supportedMech);
+ oid.elements = malloc(oid.length);
+ if (oid.elements == NULL) {
+ free_NegotiationToken(&resp);
+ return GSS_S_BAD_MECH;
+ }
+ ret = der_put_oid(((uint8_t *)oid.elements) + oid.length - 1,
+ oid.length,
+ resp.u.negTokenResp.supportedMech,
+ &len);
+ if (ret || len != oid.length) {
+ free(oid.elements);
+ free_NegotiationToken(&resp);
+ return GSS_S_BAD_MECH;
+ }
+
+ if (gss_oid_equal(GSS_SPNEGO_MECHANISM, &oid)) {
+ free(oid.elements);
+ free_NegotiationToken(&resp);
+ return gss_mg_set_error_string(GSS_SPNEGO_MECHANISM,
+ GSS_S_BAD_MECH, (*minor_status = EINVAL),
+ "SPNEGO acceptor picked SPNEGO??");
+ }
+
+ /* check if the acceptor took our optimistic token */
+ if (gss_oid_equal(ctx->preferred_mech_type, &oid)) {
+ ctx->selected_mech_type = ctx->preferred_mech_type;
+ } else if (gss_oid_equal(ctx->preferred_mech_type, GSS_KRB5_MECHANISM) &&
+ gss_oid_equal(&oid, &_gss_spnego_mskrb_mechanism_oid_desc)) {
+ /* mis-encoded asn1 type from msft servers */
+ ctx->selected_mech_type = ctx->preferred_mech_type;
+ } else {
+ /* nope, lets start over */
+ gss_delete_sec_context(&minor, &ctx->negotiated_ctx_id,
+ GSS_C_NO_BUFFER);
+ ctx->negotiated_ctx_id = GSS_C_NO_CONTEXT;
+
+ if (gss_oid_equal(&oid, GSS_NEGOEX_MECHANISM))
+ ctx->selected_mech_type = GSS_NEGOEX_MECHANISM;
+ else
+ ctx->selected_mech_type = _gss_mg_support_mechanism(&oid);
+
+ /* XXX check that server pick a mechanism we proposed */
+ if (ctx->selected_mech_type == GSS_C_NO_OID) {
+ free(oid.elements);
+ free_NegotiationToken(&resp);
+ return gss_mg_set_error_string(GSS_SPNEGO_MECHANISM,
+ GSS_S_BAD_MECH, (*minor_status = EINVAL),
+ "SPNEGO acceptor sent unsupported supportedMech");
+ }
+ }
+
+ _gss_spnego_log_mech("initiator selected mechanism", ctx->selected_mech_type);
+
+ free(oid.elements);
+
+ } else if (ctx->selected_mech_type == NULL) {
+ free_NegotiationToken(&resp);
+ return gss_mg_set_error_string(GSS_SPNEGO_MECHANISM,
+ GSS_S_BAD_MECH, (*minor_status = EINVAL),
+ "SPNEGO acceptor didn't send supportedMech");
+ }
+
+ /* if a token (of non zero length) pass to underlaying mech */
+ if ((resp.u.negTokenResp.responseToken != NULL && resp.u.negTokenResp.responseToken->length) ||
+ ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT) {
+ gss_buffer_desc mech_input_token;
+
+ if (resp.u.negTokenResp.responseToken) {
+ mech_input_token.length = resp.u.negTokenResp.responseToken->length;
+ mech_input_token.value = resp.u.negTokenResp.responseToken->data;
+ } else {
+ mech_input_token.length = 0;
+ mech_input_token.value = NULL;
+ }
+
+ /* Fall through as if the negotiated mechanism
+ was requested explicitly */
+ if (gss_oid_equal(ctx->selected_mech_type, GSS_NEGOEX_MECHANISM)) {
+ ret = _gss_negoex_init(&minor,
+ NULL, /* no optimistic token */
+ ctx,
+ (gss_cred_id_t)cred,
+ req_flags,
+ time_req,
+ input_chan_bindings,
+ &mech_input_token,
+ &mech_output_token);
+ } else {
+ ret = gss_init_sec_context(&minor,
+ cred,
+ &ctx->negotiated_ctx_id,
+ ctx->target_name,
+ ctx->selected_mech_type,
+ req_flags,
+ time_req,
+ input_chan_bindings,
+ &mech_input_token,
+ &ctx->negotiated_mech_type,
+ &mech_output_token,
+ &ctx->mech_flags,
+ &ctx->mech_time_rec);
+ if (GSS_ERROR(ret)) {
+ gss_mg_collect_error(ctx->selected_mech_type, ret, minor);
+ }
+ }
+ /*
+ * If the acceptor rejected, we're out even if the inner context is
+ * now complete. Note that the rejection is not integrity-protected.
+ */
+ if (negState == reject)
+ ret = GSS_S_BAD_MECH;
+ if (GSS_ERROR(ret)) {
+ free_NegotiationToken(&resp);
+ *minor_status = minor;
+ return ret;
+ }
+ if (ret == GSS_S_COMPLETE) {
+ ctx->flags.open = 1;
+ }
+ } else if (negState == reject) {
+ free_NegotiationToken(&resp);
+ return gss_mg_set_error_string(GSS_SPNEGO_MECHANISM,
+ GSS_S_BAD_MECH, (*minor_status = EPERM),
+ "SPNEGO acceptor rejected initiator token");
+ } else if (negState == accept_completed) {
+ /*
+ * Note that the accept_completed isn't integrity-protected, but
+ * ctx->maybe_open can only be true if the inner context is fully
+ * established.
+ */
+ if (ctx->flags.maybe_open)
+ ctx->flags.open = 1;
+
+ if (!ctx->flags.open) {
+ free_NegotiationToken(&resp);
+ return gss_mg_set_error_string(GSS_SPNEGO_MECHANISM,
+ GSS_S_BAD_MECH, (*minor_status = EINVAL),
+ "SPNEGO acceptor sent acceptor complete, "
+ "but we are not complete yet");
+ }
+ }
+
+ if (negState == request_mic) {
+ ctx->flags.peer_require_mic = 1;
+ }
+
+ if (ctx->flags.open && ctx->flags.verified_mic == 0) {
+
+ ctx->flags.require_mic = 1; /* default is to require a MIC */
+ ctx->flags.safe_omit = _gss_spnego_safe_omit_mechlist_mic(ctx);
+
+ /*
+ * If the peer sent mechListMIC, require it to verify ...
+ */
+ if (resp.u.negTokenResp.mechListMIC) {
+ heim_octet_string *m = resp.u.negTokenResp.mechListMIC;
+
+ /* ...unless its a windows 2000 server that sends the
+ * responseToken inside the mechListMIC too. We only
+ * accept this condition if would have been safe to omit
+ * anyway. */
+
+ if (ctx->flags.safe_omit
+ && resp.u.negTokenResp.responseToken
+ && der_heim_octet_string_cmp(m, resp.u.negTokenResp.responseToken) == 0)
+ {
+ ctx->flags.require_mic = 0;
+ }
+ }
+
+ } else {
+ ctx->flags.require_mic = 0;
+ }
+
+ /*
+ * If we are supposed to check mic and have it, force checking now.
+ */
+
+ if (ctx->flags.require_mic && resp.u.negTokenResp.mechListMIC) {
+
+ ret = _gss_spnego_verify_mechtypes_mic(minor_status, ctx,
+ resp.u.negTokenResp.mechListMIC);
+ if (ret) {
+ free_NegotiationToken(&resp);
+ return ret;
+ }
+ }
+
+ /*
+ * Now that underlaying mech is open (conncted), we can figure out
+ * what nexd step to go to.
+ */
+
+ if (ctx->flags.open) {
+
+ if (negState == accept_completed && ctx->flags.safe_omit) {
+ ctx->initiator_state = step_completed;
+ ret = GSS_S_COMPLETE;
+ } else if (ctx->flags.require_mic != 0 && ctx->flags.verified_mic == 0) {
+ ctx->initiator_state = wait_server_mic;
+ ret = GSS_S_CONTINUE_NEEDED;
+ } else {
+ ctx->initiator_state = step_completed;
+ ret = GSS_S_COMPLETE;
+ }
+ }
+
+ if (negState != accept_completed ||
+ ctx->initiator_state != step_completed ||
+ mech_output_token.length)
+ {
+ OM_uint32 ret2;
+ ret2 = make_reply(minor_status, ctx,
+ &mech_output_token,
+ output_token);
+ if (ret2)
+ ret = ret2;
+ }
+
+ free_NegotiationToken(&resp);
+
+ gss_release_buffer(&minor, &mech_output_token);
+
+ if (ret_flags)
+ *ret_flags = ctx->mech_flags;
+ if (time_rec)
+ *time_rec = ctx->mech_time_rec;
+
+ return ret;
+}
+
+static OM_uint32
+wait_server_mic(OM_uint32 * minor_status,
+ gss_const_cred_id_t cred,
+ gssspnego_ctx ctx,
+ gss_const_name_t target_name,
+ gss_const_OID mech_type,
+ OM_uint32 req_flags,
+ OM_uint32 time_req,
+ const gss_channel_bindings_t input_chan_bindings,
+ gss_const_buffer_t input_token,
+ gss_buffer_t output_token,
+ OM_uint32 * ret_flags,
+ OM_uint32 * time_rec)
+{
+ OM_uint32 major_status;
+ NegotiationToken resp;
+ int ret;
+
+ ret = decode_NegotiationToken(input_token->value, input_token->length, &resp, NULL);
+ if (ret)
+ return gss_mg_set_error_string(GSS_SPNEGO_MECHANISM,
+ GSS_S_BAD_MECH, ret,
+ "Failed to decode NegotiationToken");
+
+ if (resp.element != choice_NegotiationToken_negTokenResp
+ || resp.u.negTokenResp.negState == NULL
+ || *resp.u.negTokenResp.negState != accept_completed)
+ {
+ free_NegotiationToken(&resp);
+ return gss_mg_set_error_string(GSS_SPNEGO_MECHANISM,
+ GSS_S_BAD_MECH, (*minor_status = EINVAL),
+ "NegToken not accept_completed");
+ }
+
+ if (resp.u.negTokenResp.mechListMIC) {
+ major_status = _gss_spnego_verify_mechtypes_mic(minor_status, ctx,
+ resp.u.negTokenResp.mechListMIC);
+ } else if (ctx->flags.safe_omit == 0) {
+ free_NegotiationToken(&resp);
+ return gss_mg_set_error_string(GSS_SPNEGO_MECHANISM,
+ GSS_S_BAD_MECH, (*minor_status = EINVAL),
+ "Waiting for MIC, but its missing in server request");
+ } else {
+ major_status = GSS_S_COMPLETE;
+ }
+
+ free_NegotiationToken(&resp);
+ if (major_status != GSS_S_COMPLETE)
+ return major_status;
+
+ ctx->flags.verified_mic = 1;
+ ctx->initiator_state = step_completed;
+
+ if (ret_flags)
+ *ret_flags = ctx->mech_flags;
+ if (time_rec)
+ *time_rec = ctx->mech_time_rec;
+
+ *minor_status = 0;
+ return GSS_S_COMPLETE;
+}
+
+static OM_uint32
+step_completed(OM_uint32 * minor_status,
+ gss_const_cred_id_t cred,
+ gssspnego_ctx ctx,
+ gss_const_name_t name,
+ gss_const_OID mech_type,
+ OM_uint32 req_flags,
+ OM_uint32 time_req,
+ const gss_channel_bindings_t input_chan_bindings,
+ gss_const_buffer_t input_token,
+ gss_buffer_t output_token,
+ OM_uint32 * ret_flags,
+ OM_uint32 * time_rec)
+{
+ return gss_mg_set_error_string(GSS_SPNEGO_MECHANISM,
+ GSS_S_BAD_STATUS, (*minor_status = EINVAL),
+ "SPNEGO called got ISC call one too many");
+}
+
+OM_uint32 GSSAPI_CALLCONV
+_gss_spnego_init_sec_context(OM_uint32 * minor_status,
+ gss_const_cred_id_t initiator_cred_handle,
+ gss_ctx_id_t * context_handle,
+ gss_const_name_t target_name,
+ const gss_OID mech_type,
+ OM_uint32 req_flags,
+ OM_uint32 time_req,
+ const gss_channel_bindings_t input_chan_bindings,
+ const gss_buffer_t input_token,
+ gss_OID * actual_mech_type,
+ gss_buffer_t output_token,
+ OM_uint32 * ret_flags,
+ OM_uint32 * time_rec)
+{
+ gssspnego_ctx ctx;
+ OM_uint32 ret;
+
+ if (*context_handle == GSS_C_NO_CONTEXT) {
+ ret = _gss_spnego_alloc_sec_context(minor_status, context_handle);
+ if (GSS_ERROR(ret))
+ return ret;
+
+ ctx = (gssspnego_ctx)*context_handle;
+
+ ctx->initiator_state = spnego_initial;
+ } else {
+ ctx = (gssspnego_ctx)*context_handle;
+ }
+
+
+ HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
+
+ do {
+ ret = ctx->initiator_state(minor_status, initiator_cred_handle, ctx, target_name,
+ mech_type, req_flags, time_req, input_chan_bindings, input_token,
+ output_token, ret_flags, time_rec);
+
+ } while (ret == GSS_S_COMPLETE &&
+ ctx->initiator_state != step_completed &&
+ output_token->length == 0);
+
+ /* destroy context in case of error */
+ if (GSS_ERROR(ret)) {
+ OM_uint32 junk;
+ _gss_spnego_internal_delete_sec_context(&junk, context_handle, GSS_C_NO_BUFFER);
+ } else {
+
+ HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
+
+ if (actual_mech_type)
+ *actual_mech_type = ctx->negotiated_mech_type;
+ }
+
+ return ret;
+}
+
diff --git a/third_party/heimdal/lib/gssapi/spnego/negoex_ctx.c b/third_party/heimdal/lib/gssapi/spnego/negoex_ctx.c
new file mode 100644
index 00000000000..13e28bb59fd
--- /dev/null
+++ b/third_party/heimdal/lib/gssapi/spnego/negoex_ctx.c
@@ -0,0 +1,1037 @@
+/*
+ * Copyright (C) 2011-2021 PADL Software Pty Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spnego_locl.h"
+
+/*
+ * The initial context token emitted by the initiator is a INITIATOR_NEGO
+ * message followed by zero or more INITIATOR_META_DATA tokens, and zero
+ * or one AP_REQUEST tokens.
+ *
+ * Upon receiving this, the acceptor computes the list of mutually supported
+ * authentication mechanisms and performs the metadata exchange. The output
+ * token is ACCEPTOR_NEGO followed by zero or more ACCEPTOR_META_DATA tokens,
+ * and zero or one CHALLENGE tokens.
+ *
+ * Once the metadata exchange is complete and a mechanism is selected, the
+ * selected mechanism's context token exchange continues with AP_REQUEST and
+ * CHALLENGE messages.
+ *
+ * Once the context token exchange is complete, VERIFY messages are sent to
+ * authenticate the entire exchange.
+ */
+
+static OM_uint32
+buffer_set_to_crypto(OM_uint32 *minor,
+ krb5_context context,
+ gss_buffer_set_t buffers,
+ krb5_crypto *crypto)
+{
+ krb5_error_code ret;
+ krb5_keyblock keyblock;
+ OM_uint32 tmp;
+
+ /*
+ * Returned keys must be in two buffers, with the key contents in
+ * the first and the enctype as a 32-bit little-endian integer in
+ * the second.
+ */
+ if (buffers->count != 2 ||
+ buffers->elements[1].length != sizeof(tmp)) {
+ *minor = (OM_uint32)NEGOEX_NO_VERIFY_KEY;
+ return GSS_S_FAILURE;
+ }
+
+ if (*crypto != NULL) {
+ krb5_crypto_destroy(context, *crypto);
+ *crypto = NULL;
+ }
+
+ keyblock.keyvalue.data = buffers->elements[0].value;
+ keyblock.keyvalue.length = buffers->elements[0].length;
+ _gss_mg_decode_le_uint32(buffers->elements[1].value, &tmp);
+ keyblock.keytype = tmp;
+
+ ret = krb5_crypto_init(context, &keyblock, 0, crypto);
+ if (ret) {
+ *minor = ret;
+ return GSS_S_FAILURE;
+ }
+
+ return GSS_S_COMPLETE;
+}
+
+#define NEGOEX_SIGN_KEY 1
+#define NEGOEX_VERIFY_KEY 2
+#define NEGOEX_BOTH_KEYS (NEGOEX_SIGN_KEY|NEGOEX_VERIFY_KEY)
+
+static OM_uint32
+get_session_keys(OM_uint32 *minor,
+ krb5_context context,
+ OM_uint32 flags,
+ struct negoex_auth_mech *mech)
+{
+ OM_uint32 major, tmpMinor;
+ gss_buffer_set_t buffers = GSS_C_NO_BUFFER_SET;
+
+ if (flags & NEGOEX_SIGN_KEY) {
+ major = gss_inquire_sec_context_by_oid(&tmpMinor, mech->mech_context,
+ GSS_C_INQ_NEGOEX_KEY, &buffers);
+ if (major == GSS_S_COMPLETE) {
+ major = buffer_set_to_crypto(minor, context,
+ buffers, &mech->crypto);
+ _gss_secure_release_buffer_set(&tmpMinor, &buffers);
+ if (major != GSS_S_COMPLETE)
+ return major;
+ }
+ }
+
+ if (flags & NEGOEX_VERIFY_KEY) {
+ major = gss_inquire_sec_context_by_oid(&tmpMinor, mech->mech_context,
+ GSS_C_INQ_NEGOEX_VERIFY_KEY,
+ &buffers);
+ if (major == GSS_S_COMPLETE) {
+ major = buffer_set_to_crypto(minor, context,
+ buffers, &mech->verify_crypto);
+ _gss_secure_release_buffer_set(&tmpMinor, &buffers);
+ if (major != GSS_S_COMPLETE)
+ return major;
+ }
+ }
+
+ return GSS_S_COMPLETE;
+}
+
+static OM_uint32
+emit_initiator_nego(OM_uint32 *minor, gssspnego_ctx ctx)
+{
+ uint8_t random[32];
+ struct negoex_auth_mech *mech;
+ size_t i = 0;
+
+ krb5_generate_random_block(random, sizeof(random));
+
+ HEIM_TAILQ_FOREACH(mech, &ctx->negoex_mechs, links)
+ _gss_negoex_log_auth_scheme(ctx->flags.local, ++i, mech->scheme);
+
+ return _gss_negoex_add_nego_message(minor, ctx, INITIATOR_NEGO, random);
+}
+
+static OM_uint32
+process_initiator_nego(OM_uint32 *minor,
+ gssspnego_ctx ctx,
+ struct negoex_message *messages,
+ size_t nmessages)
+{
+ struct nego_message *msg;
+ size_t i;
+
+ heim_assert(!ctx->flags.local && ctx->negoex_step == 1,
+ "NegoEx INITIATOR_NEGO token received after first leg");
+
+ msg = _gss_negoex_locate_nego_message(messages, nmessages, INITIATOR_NEGO);
+ if (msg == NULL) {
+ *minor = (OM_uint32)NEGOEX_MISSING_NEGO_MESSAGE;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ for (i = 0; i < msg->nschemes; i++)
+ _gss_negoex_log_auth_scheme(ctx->flags.local, i + 1, &msg->schemes[i * GUID_LENGTH]);
+
+ _gss_negoex_restrict_auth_schemes(ctx, msg->schemes, msg->nschemes);
+
+ return GSS_S_COMPLETE;
+}
+
+static OM_uint32
+emit_acceptor_nego(OM_uint32 *minor, gssspnego_ctx ctx)
+{
+ uint8_t random[32];
+
+ krb5_generate_random_block(random, 32);
+
+ return _gss_negoex_add_nego_message(minor, ctx, ACCEPTOR_NEGO, random);
+}
+
+static OM_uint32
+process_acceptor_nego(OM_uint32 *minor,
+ gssspnego_ctx ctx,
+ struct negoex_message *messages,
+ size_t nmessages)
+{
+ struct nego_message *msg;
+
+ msg = _gss_negoex_locate_nego_message(messages, nmessages, ACCEPTOR_NEGO);
+ if (msg == NULL) {
+ *minor = (OM_uint32)NEGOEX_MISSING_NEGO_MESSAGE;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ /*
+ * Reorder and prune our mech list to match the acceptor's list (or a
+ * subset of it).
+ */
+ _gss_negoex_common_auth_schemes(ctx, msg->schemes, msg->nschemes);
+
+ return GSS_S_COMPLETE;
+}
+
+static void
+query_meta_data(gssspnego_ctx ctx,
+ struct gssspnego_optimistic_ctx *opt,
+ gss_cred_id_t cred,
+ OM_uint32 req_flags)
+{
+ OM_uint32 major, minor;
+ struct negoex_auth_mech *p, *next;
+
+ /*
+ * Note that if we received an optimistic context token from SPNEGO,
+ * then we will call QMD after ISC, rather than before. Mechanisms
+ * must be prepared to handle this and must not assume the context
+ * will be NULL on entry.
+ */
+ HEIM_TAILQ_FOREACH_SAFE(p, &ctx->negoex_mechs, links, next) {
+ if (opt != NULL && memcmp(opt->scheme, p->scheme, GUID_LENGTH) == 0)
+ p->mech_context = opt->gssctx;;
+
+ major = gssspi_query_meta_data(&minor, p->oid, cred, &p->mech_context,
+ ctx->target_name, req_flags, &p->metadata);
+ /* GSS_Query_meta_data failure removes mechanism from list. */
+ if (major != GSS_S_COMPLETE)
+ _gss_negoex_delete_auth_mech(ctx, p);
+ }
+}
+
+static void
+exchange_meta_data(gssspnego_ctx ctx,
+ gss_cred_id_t cred,
+ OM_uint32 req_flags,
+ struct negoex_message *messages,
+ size_t nmessages)
+{
+ OM_uint32 major, minor;
+ struct negoex_auth_mech *mech;
+ enum message_type type;
+ struct exchange_message *msg;
+ uint32_t i;
+
+ type = ctx->flags.local ? ACCEPTOR_META_DATA : INITIATOR_META_DATA;
+
+ for (i = 0; i < nmessages; i++) {
+ if (messages[i].type != type)
+ continue;
+ msg = &messages[i].u.e;
+
+ mech = _gss_negoex_locate_auth_scheme(ctx, msg->scheme);
+ if (mech == NULL)
+ continue;
+
+ major = gssspi_exchange_meta_data(&minor, mech->oid, cred,
+ &mech->mech_context,
+ ctx->target_name,
+ req_flags, &msg->token);
+ /* GSS_Exchange_meta_data failure removes mechanism from list. */
+ if (major != GSS_S_COMPLETE)
+ _gss_negoex_delete_auth_mech(ctx, mech);
+ }
+}
+
+static void
+release_mech_crypto(struct negoex_auth_mech *mech)
+{
+ krb5_context context = NULL;
+
+ if (mech->crypto || mech->verify_crypto)
+ context = _gss_mg_krb5_context();
+
+ if (mech->crypto) {
+ krb5_crypto_destroy(context, mech->crypto);
+ mech->crypto = NULL;
+ }
+
+ if (mech->verify_crypto) {
+ krb5_crypto_destroy(context, mech->verify_crypto);
+ mech->verify_crypto = NULL;
+ }
+
+ mech->sent_checksum = FALSE;
+}
+
+/*
+ * In the initiator, if we are processing the acceptor's first reply, discard
+ * the optimistic context if the acceptor ignored the optimistic token. If the
+ * acceptor continued the optimistic mech, discard all other mechs.
+ */
+static void
+check_optimistic_result(gssspnego_ctx ctx,
+ struct negoex_message *messages,
+ size_t nmessages)
+{
+ struct negoex_auth_mech *mech;
+ OM_uint32 tmpMinor;
+
+ heim_assert(ctx->flags.local && ctx->negoex_step == 2,
+ "NegoEx optimistic result should only be checked in second leg");
+
+ /* Do nothing if we didn't make an optimistic context. */
+ mech = HEIM_TAILQ_FIRST(&ctx->negoex_mechs);
+ if (mech == NULL || mech->mech_context == GSS_C_NO_CONTEXT)
+ return;
+
+ /*
+ * If the acceptor used the optimistic token, it will send an acceptor
+ * token or a checksum (or both) in its first reply.
+ */
+ if (_gss_negoex_locate_exchange_message(messages, nmessages,
+ CHALLENGE) != NULL ||
+ _gss_negoex_locate_verify_message(messages, nmessages) != NULL) {
+ /*
+ * The acceptor continued the optimistic mech, and metadata exchange
+ * didn't remove it. Commit to this mechanism.
+ */
+ _gss_negoex_select_auth_mech(ctx, mech);
+ } else {
+ /*
+ * The acceptor ignored the optimistic token. Restart the mech.
+ */
+ gss_delete_sec_context(&tmpMinor, &mech->mech_context, GSS_C_NO_BUFFER);
+ release_mech_crypto(mech);
+ mech->complete = FALSE;
+ }
+}
+
+/* Perform an initiator step of the underlying mechanism exchange. */
+static OM_uint32
+mech_init(OM_uint32 *minor,
+ struct gssspnego_optimistic_ctx *opt,
+ gssspnego_ctx ctx,
+ gss_cred_id_t cred,
+ OM_uint32 req_flags,
+ OM_uint32 time_req,
+ const gss_channel_bindings_t input_chan_bindings,
+ struct negoex_message *messages,
+ size_t nmessages,
+ gss_buffer_t output_token,
+ int *mech_error)
+{
+ OM_uint32 major, first_major = GSS_S_COMPLETE, first_minor = 0;
+ struct negoex_auth_mech *mech = NULL;
+ gss_buffer_t input_token = GSS_C_NO_BUFFER;
+ struct exchange_message *msg;
+ int first_mech;
+ krb5_context context = _gss_mg_krb5_context();
+
+ output_token->value = NULL;
+ output_token->length = 0;
+
+ *mech_error = FALSE;
+
+ /* Allow disabling of optimistic token for testing. */
+ if (ctx->negoex_step == 1 &&
+ secure_getenv("NEGOEX_NO_OPTIMISTIC_TOKEN") != NULL)
+ return GSS_S_COMPLETE;
+
+ if (HEIM_TAILQ_EMPTY(&ctx->negoex_mechs)) {
+ *minor = (OM_uint32)NEGOEX_NO_AVAILABLE_MECHS;
+ return GSS_S_FAILURE;
+ }
+
+ /*
+ * Get the input token. The challenge could be for the optimistic mech,
+ * which we might have discarded in metadata exchange, so ignore the
+ * challenge if it doesn't match the first auth mech.
+ */
+ mech = HEIM_TAILQ_FIRST(&ctx->negoex_mechs);
+ msg = _gss_negoex_locate_exchange_message(messages, nmessages, CHALLENGE);
+ if (msg != NULL && GUID_EQ(msg->scheme, mech->scheme))
+ input_token = &msg->token;
+
+ if (mech->complete)
+ return GSS_S_COMPLETE;
+
+ first_mech = TRUE;
+ major = GSS_S_BAD_MECH;
+
+ while (!HEIM_TAILQ_EMPTY(&ctx->negoex_mechs)) {
+ mech = HEIM_TAILQ_FIRST(&ctx->negoex_mechs);
+
+ /*
+ * If SPNEGO generated an optimistic token when probing available
+ * mechanisms, we can reuse it here. This avoids a potentially
+ * expensive and redundant call to GSS_Init_sec_context();
+ */
+ if (opt != NULL && memcmp(opt->scheme, mech->scheme, GUID_LENGTH) == 0) {
+ heim_assert(ctx->negoex_step == 1,
+ "SPNEGO optimistic token only valid for NegoEx first leg");
+
+ major = _gss_copy_buffer(minor, &opt->optimistic_token, output_token);
+ if (GSS_ERROR(major))
+ return major;
+
+ ctx->negotiated_mech_type = opt->negotiated_mech_type;
+ ctx->mech_flags = opt->optimistic_flags;
+ ctx->mech_time_rec = opt->optimistic_time_rec;
+
+ mech->mech_context = opt->gssctx;
+ opt->gssctx = NULL; /* steal it */
+
+ mech->complete = opt->complete;
+ major = GSS_S_COMPLETE;
+ } else {
+ major = gss_init_sec_context(minor, cred, &mech->mech_context,
+ ctx->target_name, mech->oid,
+ req_flags, time_req,
+ input_chan_bindings, input_token,
+ &ctx->negotiated_mech_type, output_token,
+ &ctx->mech_flags, &ctx->mech_time_rec);
+ if (major == GSS_S_COMPLETE)
+ mech->complete = 1;
+ else if (GSS_ERROR(major)) {
+ gss_mg_collect_error(mech->oid, major, *minor);
+ *mech_error = TRUE;
+ }
+ }
+ if (!GSS_ERROR(major))
+ return get_session_keys(minor, context, NEGOEX_BOTH_KEYS, mech);
+
+ /* Remember the error we got from the first mech. */
+ if (first_mech) {
+ first_major = major;
+ first_minor = *minor;
+ }
+
+ /* If we still have multiple mechs to try, move on to the next one. */
+ _gss_negoex_delete_auth_mech(ctx, mech);
+ first_mech = FALSE;
+ input_token = GSS_C_NO_BUFFER;
+ }
+
+ if (HEIM_TAILQ_EMPTY(&ctx->negoex_mechs)) {
+ major = first_major;
+ *minor = first_minor;
+ }
+
+ return major;
+}
+
+/* Perform an acceptor step of the underlying mechanism exchange. */
+static OM_uint32
+mech_accept(OM_uint32 *minor,
+ gssspnego_ctx ctx,
+ gss_cred_id_t cred,
+ const gss_channel_bindings_t input_chan_bindings,
+ struct negoex_message *messages,
+ size_t nmessages,
+ gss_buffer_t output_token,
+ gss_cred_id_t *deleg_cred,
+ int *mech_error)
+{
+ OM_uint32 major, tmpMinor;
+ struct negoex_auth_mech *mech;
+ struct exchange_message *msg;
+ krb5_context context = _gss_mg_krb5_context();
+
+ heim_assert(!ctx->flags.local && !HEIM_TAILQ_EMPTY(&ctx->negoex_mechs),
+ "Acceptor NegoEx function called in wrong sequence");
+
+ *mech_error = FALSE;
+
+ msg = _gss_negoex_locate_exchange_message(messages, nmessages, AP_REQUEST);
+ if (msg == NULL) {
+ /*
+ * No input token is okay on the first request or if the mech is
+ * complete.
+ */
+ if (ctx->negoex_step == 1 ||
+ HEIM_TAILQ_FIRST(&ctx->negoex_mechs)->complete)
+ return GSS_S_COMPLETE;
+ *minor = (OM_uint32)NEGOEX_MISSING_AP_REQUEST_MESSAGE;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ if (ctx->negoex_step == 1) {
+ /*
+ * Ignore the optimistic token if it isn't for our most preferred
+ * mech.
+ */
+ mech = HEIM_TAILQ_FIRST(&ctx->negoex_mechs);
+ if (!GUID_EQ(msg->scheme, mech->scheme)) {
+ _gss_mg_log(10, "negoex ignored optimistic token as not for preferred mech");
+ return GSS_S_COMPLETE;
+ }
+ } else {
+ /* The initiator has selected a mech; discard other entries. */
+ mech = _gss_negoex_locate_auth_scheme(ctx, msg->scheme);
+ if (mech == NULL) {
+ *minor = (OM_uint32)NEGOEX_NO_AVAILABLE_MECHS;
+ return GSS_S_FAILURE;
+ }
+ _gss_negoex_select_auth_mech(ctx, mech);
+ }
+
+ if (mech->complete)
+ return GSS_S_COMPLETE;
+
+ if (ctx->mech_src_name != GSS_C_NO_NAME)
+ gss_release_name(&tmpMinor, &ctx->mech_src_name);
+ if (deleg_cred && *deleg_cred != GSS_C_NO_CREDENTIAL)
+ gss_release_cred(&tmpMinor, deleg_cred);
+
+ major = gss_accept_sec_context(minor, &mech->mech_context, cred,
+ &msg->token, input_chan_bindings,
+ &ctx->mech_src_name, &ctx->negotiated_mech_type,
+ output_token, &ctx->mech_flags,
+ &ctx->mech_time_rec, deleg_cred);
+ if (major == GSS_S_COMPLETE)
+ mech->complete = 1;
+
+ if (!GSS_ERROR(major)) {
+ if (major == GSS_S_COMPLETE &&
+ !gss_oid_equal(ctx->negotiated_mech_type, mech->oid))
+ _gss_mg_log(1, "negoex client didn't send the mech they said they would");
+
+ major = get_session_keys(minor, context, NEGOEX_BOTH_KEYS, mech);
+ } else if (ctx->negoex_step == 1) {
+ gss_mg_collect_error(ctx->negotiated_mech_type, major, *minor);
+ *mech_error = TRUE;
+
+ /* This was an optimistic token; pretend this never happened. */
+ major = GSS_S_COMPLETE;
+ *minor = 0;
+ gss_release_buffer(&tmpMinor, output_token);
+ gss_delete_sec_context(&tmpMinor, &mech->mech_context, GSS_C_NO_BUFFER);
+ }
+
+ return major;
+}
+
+static krb5_keyusage
+verify_keyusage(gssspnego_ctx ctx, int make_checksum)
+{
+ /* Of course, these are the wrong way around in the spec. */
+ return (ctx->flags.local ^ !make_checksum) ?
+ NEGOEX_KEYUSAGE_ACCEPTOR_CHECKSUM : NEGOEX_KEYUSAGE_INITIATOR_CHECKSUM;
+}
+
+static OM_uint32
+verify_key_flags(gssspnego_ctx ctx, int make_checksum)
+{
+ return (ctx->flags.local ^ make_checksum) ?
+ NEGOEX_SIGN_KEY : NEGOEX_VERIFY_KEY;
+}
+
+static OM_uint32
+verify_checksum(OM_uint32 *minor,
+ gssspnego_ctx ctx,
+ struct negoex_message *messages,
+ size_t nmessages,
+ gss_const_buffer_t input_token,
+ int *send_alert_out)
+{
+ krb5_error_code ret;
+ struct negoex_auth_mech *mech = HEIM_TAILQ_FIRST(&ctx->negoex_mechs);
+ struct verify_message *msg;
+ krb5_context context = _gss_mg_krb5_context();
+ krb5_crypto_iov iov[3];
+ krb5_keyusage usage = verify_keyusage(ctx, FALSE);
+
+ *send_alert_out = FALSE;
+ heim_assert(mech != NULL, "Invalid null mech when verifying NegoEx checksum");
+
+ /*
+ * The other party may not be ready to send a verify token yet, or (in the
+ * first initiator step) may send one for a mechanism we don't support.
+ */
+ msg = _gss_negoex_locate_verify_message(messages, nmessages);
+ if (msg == NULL || !GUID_EQ(msg->scheme, mech->scheme))
+ return GSS_S_COMPLETE;
+
+ /*
+ * Last chance attempt to obtain session key for imported exported partial
+ * contexts (which do not carry the session key at the NegoEx layer).
+ */
+ if (mech->verify_crypto == NULL)
+ get_session_keys(minor, context, verify_key_flags(ctx, FALSE), mech);
+
+ /*
+ * A recoverable error may cause us to be unable to verify a token from the
+ * other party. In this case we should send an alert.
+ */
+ if (mech->verify_crypto == NULL) {
+ *send_alert_out = TRUE;
+ return GSS_S_COMPLETE;
+ }
+
+ if (!krb5_checksum_is_keyed(context, msg->cksum_type)) {
+ *minor = (OM_uint32)NEGOEX_INVALID_CHECKSUM;
+ return GSS_S_BAD_SIG;
+ }
+
+ /*
+ * Verify the checksum over the existing transcript and the portion of the
+ * input token leading up to the verify message.
+ */
+ iov[0].flags = KRB5_CRYPTO_TYPE_DATA;
+ ret = krb5_storage_to_data(ctx->negoex_transcript, &iov[0].data);
+ if (ret) {
+ *minor = ret;
+ return GSS_S_FAILURE;
+ }
+
+ iov[1].flags = KRB5_CRYPTO_TYPE_DATA;
+ iov[1].data.data = input_token->value;
+ iov[1].data.length = msg->offset_in_token;
+
+ iov[2].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
+ iov[2].data.data = (uint8_t *)msg->cksum;
+ iov[2].data.length = msg->cksum_len;
+
+ ret = krb5_verify_checksum_iov(context, mech->verify_crypto, usage,
+ iov, sizeof(iov) / sizeof(iov[0]), NULL);
+ if (ret == 0)
+ mech->verified_checksum = TRUE;
+ else
+ *minor = ret;
+
+ krb5_data_free(&iov[0].data);
+
+ return (ret == 0) ? GSS_S_COMPLETE : GSS_S_FAILURE;
+}
+
+static OM_uint32
+make_checksum(OM_uint32 *minor, gssspnego_ctx ctx)
+{
+ krb5_error_code ret;
+ krb5_context context = _gss_mg_krb5_context();
+ krb5_data d;
+ krb5_keyusage usage = verify_keyusage(ctx, TRUE);
+ krb5_checksum cksum;
+ struct negoex_auth_mech *mech = HEIM_TAILQ_FIRST(&ctx->negoex_mechs);
+ OM_uint32 major;
+
+ heim_assert(mech != NULL, "Invalid null mech when making NegoEx checksum");
+
+ if (mech->crypto == NULL) {
+ if (mech->complete) {
+ /*
+ * Last chance attempt to obtain session key for imported exported partial
+ * contexts (which do not carry the session key at the NegoEx layer).
+ */
+ get_session_keys(minor, context, verify_key_flags(ctx, TRUE), mech);
+ if (mech->crypto == NULL) {
+ *minor = (OM_uint32)NEGOEX_NO_VERIFY_KEY;
+ return GSS_S_UNAVAILABLE;
+ }
+ } else {
+ return GSS_S_COMPLETE;
+ }
+ }
+
+ ret = krb5_storage_to_data(ctx->negoex_transcript, &d);
+ if (ret) {
+ *minor = ret;
+ return GSS_S_FAILURE;
+ }
+
+ ret = krb5_create_checksum(context, mech->crypto,
+ usage, 0, d.data, d.length, &cksum);
+ krb5_data_free(&d);
+ if (ret) {
+ *minor = ret;
+ return GSS_S_FAILURE;
+ }
+
+ major = _gss_negoex_add_verify_message(minor, ctx, mech->scheme,
+ cksum.cksumtype,
+ cksum.checksum.data,
+ cksum.checksum.length);
+ free_Checksum(&cksum);
+
+ if (major == GSS_S_COMPLETE)
+ mech->sent_checksum = TRUE;
+
+ return major;
+}
+
+/*
+ * If the other side sent a VERIFY_NO_KEY pulse alert, clear the checksum state
+ * on the mechanism so that we send another VERIFY message.
+ */
+static void
+process_alerts(gssspnego_ctx ctx,
+ struct negoex_message *messages,
+ uint32_t nmessages)
+{
+ struct alert_message *msg;
+ struct negoex_auth_mech *mech;
+
+ msg = _gss_negoex_locate_alert_message(messages, nmessages);
+ if (msg != NULL && msg->verify_no_key) {
+ mech = _gss_negoex_locate_auth_scheme(ctx, msg->scheme);
+ if (mech != NULL)
+ release_mech_crypto(mech);
+ }
+}
+
+static OM_uint32
+make_output_token(OM_uint32 *minor,
+ gssspnego_ctx ctx,
+ gss_buffer_t mech_output_token,
+ int send_alert,
+ gss_buffer_t output_token)
+{
+ OM_uint32 major, tmpMinor;
+ struct negoex_auth_mech *mech;
+ enum message_type type;
+ off_t old_transcript_len;
+
+ output_token->length = 0;
+ output_token->value = NULL;
+
+ old_transcript_len = krb5_storage_seek(ctx->negoex_transcript, 0, SEEK_CUR);
+
+ /*
+ * If the mech is complete and we previously sent a checksum, we just
+ * processed the last leg and don't need to send another token.
+ */
+ if (mech_output_token->length == 0 &&
+ HEIM_TAILQ_FIRST(&ctx->negoex_mechs)->sent_checksum)
+ return GSS_S_COMPLETE;
+
+ if (ctx->negoex_step == 1) {
+ if (ctx->flags.local)
+ major = emit_initiator_nego(minor, ctx);
+ else
+ major = emit_acceptor_nego(minor, ctx);
+ if (major != GSS_S_COMPLETE)
+ return major;
+
+ type = ctx->flags.local ? INITIATOR_META_DATA : ACCEPTOR_META_DATA;
+ HEIM_TAILQ_FOREACH(mech, &ctx->negoex_mechs, links) {
+ if (mech->metadata.length > 0) {
+ major = _gss_negoex_add_exchange_message(minor, ctx,
+ type, mech->scheme,
+ &mech->metadata);
+ if (major != GSS_S_COMPLETE)
+ return major;
+ }
+ }
+ }
+
+ mech = HEIM_TAILQ_FIRST(&ctx->negoex_mechs);
+
+ if (mech_output_token->length > 0) {
+ type = ctx->flags.local ? AP_REQUEST : CHALLENGE;
+ major = _gss_negoex_add_exchange_message(minor, ctx,
+ type, mech->scheme,
+ mech_output_token);
+ if (major != GSS_S_COMPLETE)
+ return major;
+ }
+
+ if (send_alert) {
+ major = _gss_negoex_add_verify_no_key_alert(minor, ctx, mech->scheme);
+ if (major != GSS_S_COMPLETE)
+ return major;
+ }
+
+ /* Try to add a VERIFY message if we haven't already done so. */
+ if (!mech->sent_checksum) {
+ major = make_checksum(minor, ctx);
+ if (major != GSS_S_COMPLETE)
+ return major;
+ }
+
+ heim_assert(ctx->negoex_transcript != NULL, "NegoEx context uninitialized");
+
+ output_token->length =
+ krb5_storage_seek(ctx->negoex_transcript, 0, SEEK_CUR) - old_transcript_len;
+ output_token->value = malloc(output_token->length);
+ if (output_token->value == NULL) {
+ *minor = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+
+ krb5_storage_seek(ctx->negoex_transcript, old_transcript_len, SEEK_SET);
+
+ if (krb5_storage_read(ctx->negoex_transcript,
+ output_token->value,
+ output_token->length) != output_token->length) {
+ *minor = ERANGE;
+ gss_release_buffer(&tmpMinor, output_token);
+ return GSS_S_FAILURE;
+ }
+
+ krb5_storage_seek(ctx->negoex_transcript, 0, SEEK_END);
+
+ return GSS_S_COMPLETE;
+}
+
+OM_uint32
+_gss_negoex_init(OM_uint32 *minor,
+ struct gssspnego_optimistic_ctx *opt,
+ gssspnego_ctx ctx,
+ gss_cred_id_t cred,
+ OM_uint32 req_flags,
+ OM_uint32 time_req,
+ const gss_channel_bindings_t input_chan_bindings,
+ gss_const_buffer_t input_token,
+ gss_buffer_t output_token)
+{
+ OM_uint32 major, tmpMinor;
+ gss_buffer_desc mech_output_token = GSS_C_EMPTY_BUFFER;
+ struct negoex_message *messages = NULL;
+ struct negoex_auth_mech *mech;
+ size_t nmessages = 0;
+ int send_alert = FALSE, mech_error = FALSE;
+
+ output_token->length = 0;
+ output_token->value = NULL;
+
+ if (ctx->negoex_step == 0 && input_token != GSS_C_NO_BUFFER &&
+ input_token->length != 0)
+ return GSS_S_DEFECTIVE_TOKEN;
+
+ major = _gss_negoex_begin(minor, ctx);
+ if (major != GSS_S_COMPLETE)
+ goto cleanup;
+
+ ctx->negoex_step++;
+
+ if (input_token != GSS_C_NO_BUFFER && input_token->length > 0) {
+ major = _gss_negoex_parse_token(minor, ctx, input_token,
+ &messages, &nmessages);
+ if (major != GSS_S_COMPLETE)
+ goto cleanup;
+ }
+
+ process_alerts(ctx, messages, nmessages);
+
+ if (ctx->negoex_step == 1) {
+ /* Choose a random conversation ID. */
+ krb5_generate_random_block(ctx->negoex_conv_id, GUID_LENGTH);
+
+ /* Query each mech for its metadata (this may prune the mech list). */
+ query_meta_data(ctx, opt, cred, req_flags);
+ } else if (ctx->negoex_step == 2) {
+ /* See if the mech processed the optimistic token. */
+ check_optimistic_result(ctx, messages, nmessages);
+
+ /* Pass the acceptor metadata to each mech to prune the list. */
+ exchange_meta_data(ctx, cred, req_flags, messages, nmessages);
+
+ /* Process the ACCEPTOR_NEGO message. */
+ major = process_acceptor_nego(minor, ctx, messages, nmessages);
+ if (major != GSS_S_COMPLETE)
+ goto cleanup;
+ }
+
+ /*
+ * Process the input token and/or produce an output token. This may prune
+ * the mech list, but on success there will be at least one mech entry.
+ */
+ major = mech_init(minor, opt, ctx, cred, req_flags, time_req,
+ input_chan_bindings, messages, nmessages,
+ &mech_output_token, &mech_error);
+ if (major != GSS_S_COMPLETE)
+ goto cleanup;
+ heim_assert(!HEIM_TAILQ_EMPTY(&ctx->negoex_mechs),
+ "Invalid empty NegoEx mechanism list");
+
+ /*
+ * At this point in step 2 we have performed the metadata exchange and
+ * chosen a mech we can use, so discard any fallback mech entries.
+ */
+ if (ctx->negoex_step == 2)
+ _gss_negoex_select_auth_mech(ctx, HEIM_TAILQ_FIRST(&ctx->negoex_mechs));
+
+ major = verify_checksum(minor, ctx, messages, nmessages, input_token,
+ &send_alert);
+ if (major != GSS_S_COMPLETE)
+ goto cleanup;
+
+ if (input_token != GSS_C_NO_BUFFER) {
+ if (krb5_storage_write(ctx->negoex_transcript,
+ input_token->value,
+ input_token->length) != input_token->length) {
+ major = GSS_S_FAILURE;
+ *minor = ENOMEM;
+ goto cleanup;
+ }
+ }
+
+ major = make_output_token(minor, ctx, &mech_output_token, send_alert,
+ output_token);
+ if (major != GSS_S_COMPLETE)
+ goto cleanup;
+
+ mech = HEIM_TAILQ_FIRST(&ctx->negoex_mechs);
+ major = (mech->complete && mech->verified_checksum) ? GSS_S_COMPLETE :
+ GSS_S_CONTINUE_NEEDED;
+
+cleanup:
+ free(messages);
+ gss_release_buffer(&tmpMinor, &mech_output_token);
+ _gss_negoex_end(ctx);
+
+ if (GSS_ERROR(major)) {
+ if (!mech_error) {
+ krb5_context context = _gss_mg_krb5_context();
+
+ gss_mg_set_error_string(GSS_SPNEGO_MECHANISM,
+ major, *minor,
+ "NegoEx failed to initialize security context: %s",
+ krb5_get_error_message(context, *minor));
+ }
+
+ _gss_negoex_release_context(ctx);
+ }
+
+ return major;
+}
+
+OM_uint32
+_gss_negoex_accept(OM_uint32 *minor,
+ gssspnego_ctx ctx,
+ gss_cred_id_t cred,
+ gss_const_buffer_t input_token,
+ const gss_channel_bindings_t input_chan_bindings,
+ gss_buffer_t output_token,
+ gss_cred_id_t *deleg_cred)
+{
+ OM_uint32 major, tmpMinor;
+ gss_buffer_desc mech_output_token = GSS_C_EMPTY_BUFFER;
+ struct negoex_message *messages = NULL;
+ struct negoex_auth_mech *mech;
+ size_t nmessages;
+ int send_alert = FALSE, mech_error = FALSE;
+
+ output_token->length = 0;
+ output_token->value = NULL;
+ if (deleg_cred)
+ *deleg_cred = GSS_C_NO_CREDENTIAL;
+
+ if (input_token == GSS_C_NO_BUFFER || input_token->length == 0) {
+ major = GSS_S_DEFECTIVE_TOKEN;
+ goto cleanup;
+ }
+
+ major = _gss_negoex_begin(minor, ctx);
+ if (major != GSS_S_COMPLETE)
+ goto cleanup;
+
+ ctx->negoex_step++;
+
+ major = _gss_negoex_parse_token(minor, ctx, input_token,
+ &messages, &nmessages);
+ if (major != GSS_S_COMPLETE)
+ goto cleanup;
+
+ process_alerts(ctx, messages, nmessages);
+
+ if (ctx->negoex_step == 1) {
+ /*
+ * Read the INITIATOR_NEGO message to prune the candidate mech list.
+ */
+ major = process_initiator_nego(minor, ctx, messages, nmessages);
+ if (major != GSS_S_COMPLETE)
+ goto cleanup;
+
+ /*
+ * Pass the initiator metadata to each mech to prune the list, and
+ * query each mech for its acceptor metadata (which may also prune the
+ * list).
+ */
+ exchange_meta_data(ctx, cred, 0, messages, nmessages);
+ query_meta_data(ctx, NULL, cred, 0);
+
+ if (HEIM_TAILQ_EMPTY(&ctx->negoex_mechs)) {
+ *minor = (OM_uint32)NEGOEX_NO_AVAILABLE_MECHS;
+ major = GSS_S_FAILURE;
+ goto cleanup;
+ }
+ }
+
+ /*
+ * Process the input token and possibly produce an output token. This may
+ * prune the list to a single mech. Continue on error if an output token
+ * is generated, so that we send the token to the initiator.
+ */
+ major = mech_accept(minor, ctx, cred, input_chan_bindings,
+ messages, nmessages, &mech_output_token,
+ deleg_cred, &mech_error);
+ if (major != GSS_S_COMPLETE && mech_output_token.length == 0)
+ goto cleanup;
+
+ if (major == GSS_S_COMPLETE) {
+ major = verify_checksum(minor, ctx, messages, nmessages, input_token,
+ &send_alert);
+ if (major != GSS_S_COMPLETE)
+ goto cleanup;
+ }
+
+ if (krb5_storage_write(ctx->negoex_transcript,
+ input_token->value,
+ input_token->length) != input_token->length) {
+ major = GSS_S_FAILURE;
+ *minor = ENOMEM;
+ goto cleanup;
+ }
+
+ major = make_output_token(minor, ctx, &mech_output_token, send_alert,
+ output_token);
+ if (major != GSS_S_COMPLETE)
+ goto cleanup;
+
+ mech = HEIM_TAILQ_FIRST(&ctx->negoex_mechs);
+ major = (mech->complete && mech->verified_checksum) ? GSS_S_COMPLETE :
+ GSS_S_CONTINUE_NEEDED;
+
+cleanup:
+ free(messages);
+ gss_release_buffer(&tmpMinor, &mech_output_token);
+ _gss_negoex_end(ctx);
+
+ if (GSS_ERROR(major)) {
+ if (!mech_error) {
+ krb5_context context = _gss_mg_krb5_context();
+
+ gss_mg_set_error_string(GSS_SPNEGO_MECHANISM,
+ major, *minor,
+ "NegoEx failed to accept security context: %s",
+ krb5_get_error_message(context, *minor));
+ }
+
+ _gss_negoex_release_context(ctx);
+ }
+
+ return major;
+}
diff --git a/third_party/heimdal/lib/gssapi/spnego/negoex_err.et b/third_party/heimdal/lib/gssapi/spnego/negoex_err.et
new file mode 100644
index 00000000000..99a8a2ec379
--- /dev/null
+++ b/third_party/heimdal/lib/gssapi/spnego/negoex_err.et
@@ -0,0 +1,25 @@
+#
+# NegoEx error messages
+#
+
+id "$Id$"
+
+error_table ngex
+
+prefix NEGOEX
+
+error_code INVALID_MESSAGE_SIGNATURE, "Invalid NegoEx signature"
+error_code INVALID_MESSAGE_TYPE, "Invalid NegoEx message type"
+error_code INVALID_MESSAGE_SIZE, "Invalid NegoEx message size"
+error_code INVALID_CONVERSATION_ID, "Invalid NegoEx conversation ID"
+error_code AUTH_SCHEME_NOT_FOUND, "NegoEx authentication scheme not found"
+error_code MISSING_NEGO_MESSAGE, "Missing NegoEx negotiate message"
+error_code MISSING_AP_REQUEST_MESSAGE, "Missing NegoEx authentication protocol request message"
+error_code NO_AVAILABLE_MECHS, "No mutually supported NegoEx authentication schemes"
+error_code NO_VERIFY_KEY, "No NegoEx verify key"
+error_code UNKNOWN_CHECKSUM_SCHEME, "Unknown NegoEx checksum scheme"
+error_code INVALID_CHECKSUM, "Invalid NegoEx checksum"
+error_code UNSUPPORTED_CRITICAL_EXTENSION, "Unsupported critical NegoEx extension"
+error_code UNSUPPORTED_VERSION, "Unsupported NegoEx version"
+error_code MESSAGE_OUT_OF_SEQUENCE, "NegoEx message out of sequence"
+
diff --git a/third_party/heimdal/lib/gssapi/spnego/negoex_locl.h b/third_party/heimdal/lib/gssapi/spnego/negoex_locl.h
new file mode 100644
index 00000000000..3e0d29a31d8
--- /dev/null
+++ b/third_party/heimdal/lib/gssapi/spnego/negoex_locl.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2011-2019 PADL Software Pty Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef NEGOEX_LOCL_H
+#define NEGOEX_LOCL_H
+
+#include <negoex_err.h>
+
+struct gssspnego_ctx_desc;
+
+#define MESSAGE_SIGNATURE 0x535458454F47454EULL
+
+#define EXTENSION_LENGTH 12
+
+#define EXTENSION_FLAG_CRITICAL 0x80000000
+
+#define CHECKSUM_SCHEME_RFC3961 1
+
+#define NEGOEX_KEYUSAGE_INITIATOR_CHECKSUM 23
+#define NEGOEX_KEYUSAGE_ACCEPTOR_CHECKSUM 25
+
+#define CHECKSUM_HEADER_LENGTH 20
+
+#define GUID_LENGTH 16
+
+typedef uint8_t auth_scheme[GUID_LENGTH];
+typedef uint8_t conversation_id[GUID_LENGTH];
+#define GUID_EQ(a, b) (memcmp(a, b, GUID_LENGTH) == 0)
+
+#define NEGO_MESSAGE_HEADER_LENGTH 96
+#define EXCHANGE_MESSAGE_HEADER_LENGTH 64
+#define VERIFY_MESSAGE_HEADER_LENGTH 80
+#define ALERT_MESSAGE_HEADER_LENGTH 72
+#define ALERT_LENGTH 12
+#define ALERT_PULSE_LENGTH 8
+
+#define ALERT_TYPE_PULSE 1
+#define ALERT_VERIFY_NO_KEY 1
+
+enum message_type {
+ INITIATOR_NEGO = 0, /* NEGO_MESSAGE */
+ ACCEPTOR_NEGO, /* NEGO_MESSAGE */
+ INITIATOR_META_DATA, /* EXCHANGE_MESSAGE */
+ ACCEPTOR_META_DATA, /* EXCHANGE_MESSAGE */
+ CHALLENGE, /* EXCHANGE_MESSAGE */
+ AP_REQUEST, /* EXCHANGE_MESSAGE */
+ VERIFY, /* VERIFY_MESSAGE */
+ ALERT, /* ALERT */
+};
+
+struct nego_message {
+ uint8_t random[32];
+ const uint8_t *schemes;
+ uint16_t nschemes;
+};
+
+struct exchange_message {
+ auth_scheme scheme;
+ gss_buffer_desc token;
+};
+
+struct verify_message {
+ auth_scheme scheme;
+ uint32_t cksum_type;
+ const uint8_t *cksum;
+ size_t cksum_len;
+ size_t offset_in_token;
+};
+
+struct alert_message {
+ auth_scheme scheme;
+ int verify_no_key;
+};
+
+struct negoex_message {
+ uint32_t type;
+ union {
+ struct nego_message n;
+ struct exchange_message e;
+ struct verify_message v;
+ struct alert_message a;
+ } u;
+};
+
+struct negoex_auth_mech {
+ HEIM_TAILQ_ENTRY(negoex_auth_mech) links;
+ gss_OID oid;
+ auth_scheme scheme;
+ gss_ctx_id_t mech_context;
+ gss_buffer_desc metadata;
+ krb5_crypto crypto;
+ krb5_crypto verify_crypto;
+ int complete;
+ int sent_checksum;
+ int verified_checksum;
+};
+
+#define NEGOEX_LOG_LEVEL 10
+
+#endif /* NEGOEX_LOCL_H */
diff --git a/third_party/heimdal/lib/gssapi/spnego/negoex_util.c b/third_party/heimdal/lib/gssapi/spnego/negoex_util.c
new file mode 100644
index 00000000000..aac09d4483b
--- /dev/null
+++ b/third_party/heimdal/lib/gssapi/spnego/negoex_util.c
@@ -0,0 +1,1042 @@
+/*
+ * Copyright (C) 2011-2019 PADL Software Pty Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spnego_locl.h"
+
+/*
+ * SPNEGO expects to find the active mech context in ctx->negotiated_ctx_id,
+ * but the metadata exchange APIs force us to have one mech context per mech
+ * entry. To address this mismatch, move the active mech context (if we have
+ * one) to ctx->negotiated_ctx_id at the end of NegoEx processing.
+ */
+void
+_gss_negoex_end(gssspnego_ctx ctx)
+{
+ struct negoex_auth_mech *mech;
+
+ mech = HEIM_TAILQ_FIRST(&ctx->negoex_mechs);
+ if (mech == NULL || mech->mech_context == GSS_C_NO_CONTEXT)
+ return;
+
+ heim_assert(ctx->negotiated_ctx_id == GSS_C_NO_CONTEXT,
+ "SPNEGO/NegoEx context mismatch");
+ ctx->negotiated_ctx_id = mech->mech_context;
+ mech->mech_context = GSS_C_NO_CONTEXT;
+}
+
+OM_uint32
+_gss_negoex_begin(OM_uint32 *minor, gssspnego_ctx ctx)
+{
+ struct negoex_auth_mech *mech;
+
+ if (ctx->negoex_transcript != NULL) {
+ /*
+ * The context is already initialized for NegoEx; undo what
+ * _gss_negoex_end() did, if applicable.
+ */
+ if (ctx->negotiated_ctx_id != GSS_C_NO_CONTEXT) {
+ mech = HEIM_TAILQ_FIRST(&ctx->negoex_mechs);
+ heim_assert(mech != NULL && mech->mech_context == GSS_C_NO_CONTEXT,
+ "NegoEx/SPNEGO context mismatch");
+ mech->mech_context = ctx->negotiated_ctx_id;
+ ctx->negotiated_ctx_id = GSS_C_NO_CONTEXT;
+ }
+ return GSS_S_COMPLETE;
+ }
+
+ ctx->negoex_transcript = krb5_storage_emem();
+ if (ctx->negoex_transcript == NULL) {
+ *minor = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+
+ krb5_storage_set_byteorder(ctx->negoex_transcript,
+ KRB5_STORAGE_BYTEORDER_LE);
+
+ return GSS_S_COMPLETE;
+}
+
+static void
+release_all_mechs(gssspnego_ctx ctx, krb5_context context)
+{
+ struct negoex_auth_mech *mech, *next;
+
+ HEIM_TAILQ_FOREACH_SAFE(mech, &ctx->negoex_mechs, links, next) {
+ _gss_negoex_release_auth_mech(context, mech);
+ }
+
+ HEIM_TAILQ_INIT(&ctx->negoex_mechs);
+}
+
+void
+_gss_negoex_release_context(gssspnego_ctx ctx)
+{
+ krb5_context context = _gss_mg_krb5_context();
+
+ if (ctx->negoex_transcript != NULL) {
+ krb5_storage_free(ctx->negoex_transcript);
+ ctx->negoex_transcript = NULL;
+ }
+
+ release_all_mechs(ctx, context);
+}
+
+static int
+guid_to_string(const uint8_t guid[16], char *buffer, size_t bufsiz)
+{
+ uint32_t data1;
+ uint16_t data2, data3;
+
+ _gss_mg_decode_le_uint32(&guid[0], &data1);
+ _gss_mg_decode_le_uint16(&guid[4], &data2);
+ _gss_mg_decode_le_uint16(&guid[6], &data3);
+
+ return snprintf(buffer, bufsiz,
+ "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
+ data1, data2, data3, guid[8], guid[9], guid[10], guid[11],
+ guid[12], guid[13], guid[14], guid[15]);
+}
+
+void
+_gss_negoex_log_auth_scheme(int initiator,
+ int index,
+ const auth_scheme scheme)
+{
+ char scheme_str[37];
+
+ guid_to_string(scheme, scheme_str, sizeof(scheme_str));
+
+ _gss_mg_log(NEGOEX_LOG_LEVEL,
+ "negoex: %s authentication scheme %d %s",
+ initiator ? "proposing" : "received", index, scheme_str);
+}
+
+void
+_gss_negoex_log_message(int direction,
+ enum message_type type,
+ const conversation_id conv_id,
+ unsigned int seqnum,
+ unsigned int header_len,
+ unsigned int msg_len)
+{
+ char conv_str[37];
+ char *typestr;
+
+ if (type == INITIATOR_NEGO)
+ typestr = "INITIATOR_NEGO";
+ else if (type == ACCEPTOR_NEGO)
+ typestr = "ACCEPTOR_NEGO";
+ else if (type == INITIATOR_META_DATA)
+ typestr = "INITIATOR_META_DATA";
+ else if (type == ACCEPTOR_META_DATA)
+ typestr = "ACCEPTOR_META_DATA";
+ else if (type == CHALLENGE)
+ typestr = "CHALLENGE";
+ else if (type == AP_REQUEST)
+ typestr = "AP_REQUEST";
+ else if (type == VERIFY)
+ typestr = "VERIFY";
+ else if (type == ALERT)
+ typestr = "ALERT";
+ else
+ typestr = "UNKNOWN";
+
+ guid_to_string(conv_id, conv_str, sizeof(conv_str));
+ _gss_mg_log(NEGOEX_LOG_LEVEL,
+ "negoex: %s (%d)%s conversation %s",
+ direction ? "received" : "sending",
+ seqnum, typestr, conv_str);
+}
+
+/*
+ * Check that the described vector lies within the message, and return a
+ * pointer to its first element.
+ */
+static inline const uint8_t *
+vector_base(size_t offset, size_t count, size_t width,
+ const uint8_t *msg_base, size_t msg_len)
+{
+ if (offset > msg_len || count > (msg_len - offset) / width)
+ return NULL;
+ return msg_base + offset;
+}
+
+static OM_uint32
+parse_nego_message(OM_uint32 *minor, krb5_storage *sp,
+ const uint8_t *msg_base, size_t msg_len,
+ struct nego_message *msg)
+{
+ krb5_error_code ret;
+ const uint8_t *p;
+ uint64_t protocol_version;
+ uint32_t extension_type, offset;
+ uint16_t count;
+ size_t i;
+
+ if (krb5_storage_read(sp, msg->random,
+ sizeof(msg->random)) != sizeof(msg->random)) {
+ *minor = (OM_uint32)NEGOEX_INVALID_MESSAGE_SIZE;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ ret = krb5_ret_uint64(sp, &protocol_version);
+ if (ret) {
+ *minor = ret;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ if (protocol_version != 0) {
+ *minor = (OM_uint32)NEGOEX_UNSUPPORTED_VERSION;
+ return GSS_S_UNAVAILABLE;
+ }
+
+ ret = krb5_ret_uint32(sp, &offset);
+ if (ret == 0)
+ ret = krb5_ret_uint16(sp, &count);
+ if (ret) {
+ *minor = ret;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ msg->schemes = vector_base(offset, count, GUID_LENGTH, msg_base, msg_len);
+ msg->nschemes = count;
+ if (msg->schemes == NULL) {
+ *minor = (OM_uint32)NEGOEX_INVALID_MESSAGE_SIZE;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ ret = krb5_ret_uint32(sp, &offset);
+ if (ret == 0)
+ ret = krb5_ret_uint16(sp, &count);
+ if (ret) {
+ *minor = ret;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+ p = vector_base(offset, count, EXTENSION_LENGTH, msg_base, msg_len);
+ for (i = 0; i < count; i++) {
+ _gss_mg_decode_le_uint32(p + i * EXTENSION_LENGTH, &extension_type);
+ if (extension_type & EXTENSION_FLAG_CRITICAL) {
+ *minor = (OM_uint32)NEGOEX_UNSUPPORTED_CRITICAL_EXTENSION;
+ return GSS_S_UNAVAILABLE;
+ }
+ }
+
+ return GSS_S_COMPLETE;
+}
+
+static OM_uint32
+parse_exchange_message(OM_uint32 *minor, krb5_storage *sp,
+ const uint8_t *msg_base, size_t msg_len,
+ struct exchange_message *msg)
+{
+ krb5_error_code ret;
+ const uint8_t *p;
+ uint32_t offset;
+ uint16_t len;
+
+ if (krb5_storage_read(sp, msg->scheme, GUID_LENGTH) != GUID_LENGTH) {
+ *minor = (OM_uint32)NEGOEX_INVALID_MESSAGE_SIZE;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ ret = krb5_ret_uint32(sp, &offset);
+ if (ret == 0)
+ ret = krb5_ret_uint16(sp, &len);
+ if (ret) {
+ *minor = (OM_uint32)NEGOEX_INVALID_MESSAGE_SIZE;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ p = vector_base(offset, len, 1, msg_base, msg_len);
+ if (p == NULL) {
+ *minor = (OM_uint32)NEGOEX_INVALID_MESSAGE_SIZE;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+ msg->token.value = (void *)p;
+ msg->token.length = len;
+
+ return GSS_S_COMPLETE;
+}
+
+static OM_uint32
+parse_verify_message(OM_uint32 *minor, krb5_storage *sp,
+ const uint8_t *msg_base, size_t msg_len,
+ size_t token_offset, struct verify_message *msg)
+{
+ krb5_error_code ret;
+ uint32_t hdrlen, cksum_scheme;
+ uint32_t offset, len;
+
+ if (krb5_storage_read(sp, msg->scheme, GUID_LENGTH) == GUID_LENGTH)
+ ret = 0;
+ else
+ ret = NEGOEX_INVALID_MESSAGE_SIZE;
+ if (ret == 0)
+ ret = krb5_ret_uint32(sp, &hdrlen);
+ if (ret) {
+ *minor = ret;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ if (hdrlen != CHECKSUM_HEADER_LENGTH) {
+ *minor = (OM_uint32)NEGOEX_INVALID_MESSAGE_SIZE;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ ret = krb5_ret_uint32(sp, &cksum_scheme);
+ if (ret == 0)
+ ret = krb5_ret_uint32(sp, &msg->cksum_type);
+ if (ret) {
+ *minor = ret;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ if (cksum_scheme != CHECKSUM_SCHEME_RFC3961) {
+ *minor = (OM_uint32)NEGOEX_UNKNOWN_CHECKSUM_SCHEME;
+ return GSS_S_UNAVAILABLE;
+ }
+
+ ret = krb5_ret_uint32(sp, &offset);
+ if (ret == 0)
+ ret = krb5_ret_uint32(sp, &len);
+ if (ret) {
+ *minor = ret;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ msg->cksum = vector_base(offset, len, 1, msg_base, msg_len);
+ msg->cksum_len = len;
+ if (msg->cksum == NULL) {
+ *minor = (OM_uint32)NEGOEX_INVALID_MESSAGE_SIZE;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ msg->offset_in_token = token_offset;
+ return GSS_S_COMPLETE;
+}
+
+static OM_uint32
+storage_from_memory(OM_uint32 *minor,
+ const uint8_t *data,
+ size_t length,
+ krb5_storage **sp)
+{
+ *sp = krb5_storage_from_readonly_mem(data, length);
+ if (sp == NULL) {
+ *minor = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+
+ krb5_storage_set_byteorder(*sp, KRB5_STORAGE_BYTEORDER_LE);
+ krb5_storage_set_eof_code(*sp, NEGOEX_INVALID_MESSAGE_SIZE);
+
+ return 0;
+}
+
+static OM_uint32
+parse_alert_message(OM_uint32 *minor, krb5_storage *sp,
+ const uint8_t *msg_base, size_t msg_len,
+ struct alert_message *msg)
+{
+ OM_uint32 major;
+ krb5_error_code ret;
+ const uint8_t *p;
+ uint32_t error_code, atype;
+ uint32_t alerts_offset, nalerts, value_offset, value_len;
+ size_t i;
+ krb5_storage *alerts;
+
+ if (krb5_storage_read(sp, msg->scheme, GUID_LENGTH) == GUID_LENGTH)
+ ret = 0;
+ else
+ ret = NEGOEX_INVALID_MESSAGE_SIZE;
+ if (ret == 0)
+ ret = krb5_ret_uint32(sp, &error_code);
+ if (ret) {
+ *minor = ret;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ ret = krb5_ret_uint32(sp, &alerts_offset);
+ if (ret == 0)
+ ret = krb5_ret_uint32(sp, &nalerts);
+ if (ret) {
+ *minor = ret;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ p = vector_base(alerts_offset, nalerts, ALERT_LENGTH, msg_base, msg_len);
+ if (p == NULL) {
+ *minor = (OM_uint32)NEGOEX_INVALID_MESSAGE_SIZE;
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ /* Look for a VERIFY_NO_KEY pulse alert in the alerts vector. */
+ msg->verify_no_key = FALSE;
+
+ major = storage_from_memory(minor, p, nalerts * ALERT_LENGTH, &alerts);
+ if (major != GSS_S_COMPLETE)
+ return major;
+
+ for (i = 0; i < nalerts; i++) {
+ ret = krb5_ret_uint32(alerts, &atype);
+ if (ret == 0)
+ ret = krb5_ret_uint32(alerts, &value_offset);
+ if (ret == 0)
+ ret = krb5_ret_uint32(alerts, &value_len);
+ if (ret) {
+ *minor = ret;
+ major = GSS_S_DEFECTIVE_TOKEN;
+ break;
+ }
+
+ p = vector_base(value_offset, value_len, 1, msg_base, msg_len);
+ if (p == NULL) {
+ *minor = (OM_uint32)NEGOEX_INVALID_MESSAGE_SIZE;
+ major = GSS_S_DEFECTIVE_TOKEN;
+ break;
+ }
+
+ if (atype == ALERT_TYPE_PULSE && value_len >= ALERT_PULSE_LENGTH) {
+ krb5_storage *pulse;
+ uint32_t hdrlen, reason;
+
+ major = storage_from_memory(minor, p, value_len, &pulse);
+ if (major != GSS_S_COMPLETE)
+ break;
+
+ ret = krb5_ret_uint32(pulse, &hdrlen);
+ if (ret == 0)
+ ret = krb5_ret_uint32(pulse, &reason);
+ krb5_storage_free(pulse);
+ if (ret) {
+ *minor = ret;
+ major = GSS_S_DEFECTIVE_TOKEN;
+ break;
+ }
+
+ if (reason == ALERT_VERIFY_NO_KEY)
+ msg->verify_no_key = TRUE;
+ }
+ }
+
+ krb5_storage_free(alerts);
+
+ return major;
+}
+
+static OM_uint32
+parse_message(OM_uint32 *minor,
+ gssspnego_ctx ctx,
+ gss_const_buffer_t token,
+ size_t *token_offset,
+ struct negoex_message *msg)
+{
+ OM_uint32 major;
+ krb5_error_code ret;
+ krb5_storage *sp;
+ uint64_t signature;
+ uint32_t header_len, msg_len;
+ uint32_t type, seqnum;
+ conversation_id conv_id;
+ size_t token_remaining = token->length - *token_offset;
+ const uint8_t *msg_base = (uint8_t *)token->value + *token_offset;
+
+ major = storage_from_memory(minor, msg_base, token_remaining, &sp);
+ if (major != GSS_S_COMPLETE)
+ return major;
+
+ major = GSS_S_DEFECTIVE_TOKEN;
+
+ ret = krb5_ret_uint64(sp, &signature);
+ if (ret == 0)
+ ret = krb5_ret_uint32(sp, &type);
+ if (ret == 0)
+ ret = krb5_ret_uint32(sp, &seqnum);
+ if (ret == 0)
+ ret = krb5_ret_uint32(sp, &header_len);
+ if (ret == 0)
+ ret = krb5_ret_uint32(sp, &msg_len);
+ if (ret == 0) {
+ if (krb5_storage_read(sp, conv_id, GUID_LENGTH) != GUID_LENGTH)
+ ret = NEGOEX_INVALID_MESSAGE_SIZE;
+ }
+ if (ret) {
+ *minor = ret;
+ goto cleanup;
+ }
+
+ if (msg_len > token_remaining || header_len > msg_len) {
+ *minor = (OM_uint32)NEGOEX_INVALID_MESSAGE_SIZE;
+ goto cleanup;
+ }
+ if (signature != MESSAGE_SIGNATURE) {
+ *minor = (OM_uint32)NEGOEX_INVALID_MESSAGE_SIGNATURE;
+ goto cleanup;
+ }
+ if (seqnum != ctx->negoex_seqnum) {
+ *minor = (OM_uint32)NEGOEX_MESSAGE_OUT_OF_SEQUENCE;
+ goto cleanup;
+ }
+ if (seqnum == 0) {
+ memcpy(ctx->negoex_conv_id, conv_id, GUID_LENGTH);
+ } else if (!GUID_EQ(conv_id, ctx->negoex_conv_id)) {
+ *minor = (OM_uint32)NEGOEX_INVALID_CONVERSATION_ID;
+ goto cleanup;
+ }
+
+ krb5_storage_truncate(sp, msg_len);
+
+ msg->type = type;
+ if (type == INITIATOR_NEGO || type == ACCEPTOR_NEGO) {
+ major = parse_nego_message(minor, sp, msg_base, msg_len, &msg->u.n);
+ } else if (type == INITIATOR_META_DATA || type == ACCEPTOR_META_DATA ||
+ type == CHALLENGE || type == AP_REQUEST) {
+ major = parse_exchange_message(minor, sp, msg_base, msg_len,
+ &msg->u.e);
+ } else if (type == VERIFY) {
+ major = parse_verify_message(minor, sp, msg_base, msg_len,
+ msg_base - (uint8_t *)token->value,
+ &msg->u.v);
+ } else if (type == ALERT) {
+ major = parse_alert_message(minor, sp, msg_base, msg_len, &msg->u.a);
+ } else {
+ *minor = (OM_uint32)NEGOEX_INVALID_MESSAGE_TYPE;
+ goto cleanup;
+ }
+
+cleanup:
+ krb5_storage_free(sp);
+
+ if (major == GSS_S_COMPLETE) {
+ _gss_negoex_log_message(1, msg->type,
+ ctx->negoex_conv_id, ctx->negoex_seqnum,
+ header_len, msg_len);
+ ctx->negoex_seqnum++;
+ *token_offset += msg_len;
+ }
+
+ return major;
+}
+
+/*
+ * Parse token into an array of negoex_message structures. All pointer fields
+ * within the parsed messages are aliases into token, so the result can be
+ * freed with free(). An unknown protocol version, a critical extension, or an
+ * unknown checksum scheme will cause a parsing failure. Increment the
+ * sequence number in ctx for each message, and record and check the
+ * conversation ID in ctx as appropriate.
+ */
+OM_uint32
+_gss_negoex_parse_token(OM_uint32 *minor,
+ gssspnego_ctx ctx,
+ gss_const_buffer_t token,
+ struct negoex_message **messages_out,
+ size_t *count_out)
+{
+ OM_uint32 major = GSS_S_DEFECTIVE_TOKEN;
+ size_t count = 0;
+ size_t token_offset = 0;
+ struct negoex_message *messages = NULL, *newptr;
+
+ *messages_out = NULL;
+ *count_out = 0;
+ heim_assert(token != GSS_C_NO_BUFFER, "Invalid null NegoEx input token");
+
+ while (token_offset < token->length) {
+ newptr = realloc(messages, (count + 1) * sizeof(*newptr));
+ if (newptr == NULL) {
+ free(messages);
+ *minor = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+ messages = newptr;
+
+ major = parse_message(minor, ctx, token, &token_offset,
+ &messages[count]);
+ if (major != GSS_S_COMPLETE)
+ break;
+
+ count++;
+ }
+
+ if (token_offset != token->length) {
+ *minor = (OM_uint32)NEGOEX_INVALID_MESSAGE_SIZE;
+ major = GSS_S_DEFECTIVE_TOKEN;
+ }
+ if (major != GSS_S_COMPLETE) {
+ free(messages);
+ return major;
+ }
+
+ *messages_out = messages;
+ *count_out = count;
+ return GSS_S_COMPLETE;
+}
+
+static struct negoex_message *
+locate_message(struct negoex_message *messages, size_t nmessages,
+ enum message_type type)
+{
+ uint32_t i;
+
+ for (i = 0; i < nmessages; i++) {
+ if (messages[i].type == type)
+ return &messages[i];
+ }
+
+ return NULL;
+}
+
+struct nego_message *
+_gss_negoex_locate_nego_message(struct negoex_message *messages,
+ size_t nmessages,
+ enum message_type type)
+{
+ struct negoex_message *msg = locate_message(messages, nmessages, type);
+
+ return (msg == NULL) ? NULL : &msg->u.n;
+}
+
+struct exchange_message *
+_gss_negoex_locate_exchange_message(struct negoex_message *messages,
+ size_t nmessages,
+ enum message_type type)
+{
+ struct negoex_message *msg = locate_message(messages, nmessages, type);
+
+ return (msg == NULL) ? NULL : &msg->u.e;
+}
+
+struct verify_message *
+_gss_negoex_locate_verify_message(struct negoex_message *messages,
+ size_t nmessages)
+{
+ struct negoex_message *msg = locate_message(messages, nmessages, VERIFY);
+
+ return (msg == NULL) ? NULL : &msg->u.v;
+}
+
+struct alert_message *
+_gss_negoex_locate_alert_message(struct negoex_message *messages,
+ size_t nmessages)
+{
+ struct negoex_message *msg = locate_message(messages, nmessages, ALERT);
+
+ return (msg == NULL) ? NULL : &msg->u.a;
+}
+
+/*
+ * Add the encoding of a MESSAGE_HEADER structure to buf, given the number of
+ * bytes of the payload following the full header. Increment the sequence
+ * number in ctx. Set *payload_start_out to the position of the payload within
+ * the message.
+ */
+static OM_uint32
+put_message_header(OM_uint32 *minor, gssspnego_ctx ctx,
+ enum message_type type, uint32_t payload_len,
+ uint32_t *payload_start_out)
+{
+ krb5_error_code ret;
+ size_t header_len = 0;
+
+ if (type == INITIATOR_NEGO || type == ACCEPTOR_NEGO)
+ header_len = NEGO_MESSAGE_HEADER_LENGTH;
+ else if (type == INITIATOR_META_DATA || type == ACCEPTOR_META_DATA ||
+ type == CHALLENGE || type == AP_REQUEST)
+ header_len = EXCHANGE_MESSAGE_HEADER_LENGTH;
+ else if (type == VERIFY)
+ header_len = VERIFY_MESSAGE_HEADER_LENGTH;
+ else if (type == ALERT)
+ header_len = ALERT_MESSAGE_HEADER_LENGTH;
+ else
+ heim_assert(0, "Invalid NegoEx message type");
+
+ /* Signature */
+ CHECK(ret, krb5_store_uint64(ctx->negoex_transcript, MESSAGE_SIGNATURE));
+ /* MessageType */
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, type));
+ /* SequenceNum */
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, ctx->negoex_seqnum));
+ /* cbHeaderLength */
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, header_len));
+ /* cbMessageLength */
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, header_len + payload_len));
+ /* ConversationId */
+ CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, ctx->negoex_conv_id, GUID_LENGTH));
+
+ _gss_negoex_log_message(0, type,
+ ctx->negoex_conv_id, ctx->negoex_seqnum,
+ header_len,
+ header_len + payload_len);
+
+ ctx->negoex_seqnum++;
+
+ *payload_start_out = header_len;
+ return GSS_S_COMPLETE;
+
+fail:
+ *minor = ret;
+ return GSS_S_FAILURE;
+}
+
+OM_uint32
+_gss_negoex_add_nego_message(OM_uint32 *minor,
+ gssspnego_ctx ctx,
+ enum message_type type,
+ uint8_t random[32])
+{
+ OM_uint32 major;
+ krb5_error_code ret;
+ struct negoex_auth_mech *mech;
+ uint32_t payload_start;
+ uint16_t nschemes;
+
+ nschemes = 0;
+ HEIM_TAILQ_FOREACH(mech, &ctx->negoex_mechs, links)
+ nschemes++;
+
+ major = put_message_header(minor, ctx, type,
+ nschemes * GUID_LENGTH, &payload_start);
+ if (major != GSS_S_COMPLETE)
+ return major;
+
+ CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, random, 32));
+ /* ProtocolVersion */
+ CHECK(ret, krb5_store_uint64(ctx->negoex_transcript, 0));
+ /* AuthSchemes vector */
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, payload_start));
+ CHECK(ret, krb5_store_uint16(ctx->negoex_transcript, nschemes));
+ /* Extensions vector */
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, payload_start));
+ CHECK(ret, krb5_store_uint16(ctx->negoex_transcript, 0));
+ /* Four bytes of padding to reach a multiple of 8 bytes. */
+ CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, "\0\0\0\0", 4));
+
+ /* Payload (auth schemes) */
+ HEIM_TAILQ_FOREACH(mech, &ctx->negoex_mechs, links) {
+ CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, mech->scheme, GUID_LENGTH));
+ }
+
+ return GSS_S_COMPLETE;
+
+fail:
+ *minor = ret;
+ return GSS_S_FAILURE;
+}
+
+OM_uint32
+_gss_negoex_add_exchange_message(OM_uint32 *minor,
+ gssspnego_ctx ctx,
+ enum message_type type,
+ const auth_scheme scheme,
+ gss_buffer_t token)
+{
+ OM_uint32 major;
+ krb5_error_code ret;
+ uint32_t payload_start;
+
+ major = put_message_header(minor, ctx, type, token->length, &payload_start);
+ if (major != GSS_S_COMPLETE)
+ return major;
+
+ CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, scheme, GUID_LENGTH));
+ /* Exchange byte vector */
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, payload_start));
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, token->length));
+ /* Payload (token) */
+ CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, token->value, token->length));
+
+ return GSS_S_COMPLETE;
+
+fail:
+ *minor = ret;
+ return GSS_S_FAILURE;
+}
+
+OM_uint32
+_gss_negoex_add_verify_message(OM_uint32 *minor,
+ gssspnego_ctx ctx,
+ const auth_scheme scheme,
+ uint32_t cksum_type,
+ const uint8_t *cksum,
+ uint32_t cksum_len)
+{
+ OM_uint32 major;
+ krb5_error_code ret;
+ uint32_t payload_start;
+
+ major = put_message_header(minor, ctx, VERIFY, cksum_len, &payload_start);
+ if (major != GSS_S_COMPLETE)
+ return major;
+
+ CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, scheme, GUID_LENGTH));
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, CHECKSUM_HEADER_LENGTH));
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, CHECKSUM_SCHEME_RFC3961));
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, cksum_type));
+ /* ChecksumValue vector */
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, payload_start));
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, cksum_len));
+ /* Four bytes of padding to reach a multiple of 8 bytes. */
+ CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, "\0\0\0\0", 4));
+ /* Payload (checksum contents) */
+ CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, cksum, cksum_len));
+
+ return GSS_S_COMPLETE;
+
+fail:
+ *minor = ret;
+ return GSS_S_FAILURE;
+}
+
+/*
+ * Add an ALERT_MESSAGE containing a single ALERT_TYPE_PULSE alert with the
+ * reason ALERT_VERIFY_NO_KEY.
+ */
+OM_uint32
+_gss_negoex_add_verify_no_key_alert(OM_uint32 *minor,
+ gssspnego_ctx ctx,
+ const auth_scheme scheme)
+{
+ OM_uint32 major;
+ krb5_error_code ret;
+ uint32_t payload_start;
+
+ major = put_message_header(minor, ctx,
+ ALERT, ALERT_LENGTH + ALERT_PULSE_LENGTH,
+ &payload_start);
+ if (major != GSS_S_COMPLETE)
+ return major;
+
+ CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, scheme, GUID_LENGTH));
+ /* ErrorCode */
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, 0));
+ /* Alerts vector */
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, payload_start));
+ CHECK(ret, krb5_store_uint16(ctx->negoex_transcript, 1));
+ /* Six bytes of padding to reach a multiple of 8 bytes. */
+ CHECK(ret, krb5_store_bytes(ctx->negoex_transcript, "\0\0\0\0\0\0", 6));
+ /* Payload part 1: a single ALERT element */
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, ALERT_TYPE_PULSE));
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript,
+ payload_start + ALERT_LENGTH));
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, ALERT_PULSE_LENGTH));
+ /* Payload part 2: ALERT_PULSE */
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, ALERT_PULSE_LENGTH));
+ CHECK(ret, krb5_store_uint32(ctx->negoex_transcript, ALERT_VERIFY_NO_KEY));
+
+ return GSS_S_COMPLETE;
+
+fail:
+ *minor = ret;
+ return GSS_S_FAILURE;
+}
+
+
+void
+_gss_negoex_release_auth_mech(krb5_context context,
+ struct negoex_auth_mech *mech)
+{
+ OM_uint32 tmpmin;
+
+ if (mech == NULL)
+ return;
+
+ gss_delete_sec_context(&tmpmin, &mech->mech_context, NULL);
+ gss_release_oid(&tmpmin, &mech->oid);
+ gss_release_buffer(&tmpmin, &mech->metadata);
+ if (mech->crypto)
+ krb5_crypto_destroy(context, mech->crypto);
+ if (mech->verify_crypto)
+ krb5_crypto_destroy(context, mech->verify_crypto);
+
+ free(mech);
+}
+
+void
+_gss_negoex_delete_auth_mech(gssspnego_ctx ctx,
+ struct negoex_auth_mech *mech)
+{
+ krb5_context context = _gss_mg_krb5_context();
+
+ HEIM_TAILQ_REMOVE(&ctx->negoex_mechs, mech, links);
+ _gss_negoex_release_auth_mech(context, mech);
+}
+
+/* Remove all auth mech entries except for mech from ctx->mechs. */
+void
+_gss_negoex_select_auth_mech(gssspnego_ctx ctx,
+ struct negoex_auth_mech *mech)
+{
+ krb5_context context = _gss_mg_krb5_context();
+
+ heim_assert(mech != NULL, "Invalid null NegoEx mech");
+ HEIM_TAILQ_REMOVE(&ctx->negoex_mechs, mech, links);
+ release_all_mechs(ctx, context);
+ HEIM_TAILQ_INSERT_HEAD(&ctx->negoex_mechs, mech, links);
+}
+
+OM_uint32
+_gss_negoex_add_auth_mech(OM_uint32 *minor,
+ gssspnego_ctx ctx,
+ gss_const_OID oid,
+ auth_scheme scheme)
+{
+ OM_uint32 major;
+ struct negoex_auth_mech *mech;
+
+ mech = calloc(1, sizeof(*mech));
+ if (mech == NULL) {
+ *minor = ENOMEM;
+ return GSS_S_FAILURE;
+ }
+
+ major = gss_duplicate_oid(minor, (gss_OID)oid, &mech->oid);
+ if (major != GSS_S_COMPLETE) {
+ free(mech);
+ return major;
+ }
+
+ memcpy(mech->scheme, scheme, GUID_LENGTH);
+
+ HEIM_TAILQ_INSERT_TAIL(&ctx->negoex_mechs, mech, links);
+
+ *minor = 0;
+ return GSS_S_COMPLETE;
+}
+
+struct negoex_auth_mech *
+_gss_negoex_locate_auth_scheme(gssspnego_ctx ctx,
+ const auth_scheme scheme)
+{
+ struct negoex_auth_mech *mech;
+
+ HEIM_TAILQ_FOREACH(mech, &ctx->negoex_mechs, links) {
+ if (GUID_EQ(mech->scheme, scheme))
+ return mech;
+ }
+
+ return NULL;
+}
+
+/*
+ * Prune ctx->mechs to the schemes present in schemes, and reorder them to
+ * match its order.
+ */
+void
+_gss_negoex_common_auth_schemes(gssspnego_ctx ctx,
+ const uint8_t *schemes,
+ uint16_t nschemes)
+{
+ struct negoex_mech_list list;
+ struct negoex_auth_mech *mech;
+ uint16_t i;
+ krb5_context context = _gss_mg_krb5_context();
+
+ /* Construct a new list in the order of schemes. */
+ HEIM_TAILQ_INIT(&list);
+ for (i = 0; i < nschemes; i++) {
+ mech = _gss_negoex_locate_auth_scheme(ctx, schemes + i * GUID_LENGTH);
+ if (mech == NULL)
+ continue;
+ HEIM_TAILQ_REMOVE(&ctx->negoex_mechs, mech, links);
+ HEIM_TAILQ_INSERT_TAIL(&list, mech, links);
+ }
+
+ /* Release any leftover entries and replace the context list. */
+ release_all_mechs(ctx, context);
+ HEIM_TAILQ_CONCAT(&ctx->negoex_mechs, &list, links);
+}
+
+/*
+ * Prune ctx->mechs to the schemes present in schemes, but do not change
+ * their order.
+ */
+void
+_gss_negoex_restrict_auth_schemes(gssspnego_ctx ctx,
+ const uint8_t *schemes,
+ uint16_t nschemes)
+{
+ struct negoex_auth_mech *mech, *next;
+ uint16_t i;
+ int found;
+
+ HEIM_TAILQ_FOREACH_SAFE(mech, &ctx->negoex_mechs, links, next) {
+ found = FALSE;
+ for (i = 0; i < nschemes && !found; i++) {
+ if (GUID_EQ(mech->scheme, schemes + i * GUID_LENGTH))
+ found = TRUE;
+ }
+
+ if (!found)
+ _gss_negoex_delete_auth_mech(ctx, mech);
+ }
+}
+
+/*
+ * Return the OID of the current NegoEx mechanism.
+ */
+struct negoex_auth_mech *
+_gss_negoex_negotiated_mech(gssspnego_ctx ctx)
+{
+ return HEIM_TAILQ_FIRST(&ctx->negoex_mechs);
+}
+
+/*
+ * Returns TRUE if mechanism can be negotiated by both NegoEx and SPNEGO
+ */
+
+int
+_gss_negoex_and_spnego_mech_p(gss_const_OID mech)
+{
+ OM_uint32 major, minor;
+ gss_OID_set attrs = GSS_C_NO_OID_SET;
+ int negoex_and_spnego = FALSE;
+
+ major = gss_inquire_attrs_for_mech(&minor, mech, &attrs, NULL);
+ if (major == GSS_S_COMPLETE) {
+ gss_test_oid_set_member(&minor, GSS_C_MA_NEGOEX_AND_SPNEGO,
+ attrs, &negoex_and_spnego);
+ gss_release_oid_set(&minor, &attrs);
+ }
+
+ return negoex_and_spnego;
+}
+
+int
+_gss_negoex_mech_p(gss_const_OID mech)
+{
+ OM_uint32 minor;
+ auth_scheme scheme;
+
+ return gssspi_query_mechanism_info(&minor, mech,
+ scheme) == GSS_S_COMPLETE;
+}
+
diff --git a/third_party/heimdal/lib/gssapi/spnego/spnego.asn1 b/third_party/heimdal/lib/gssapi/spnego/spnego.asn1
new file mode 100644
index 00000000000..bd69a0512aa
--- /dev/null
+++ b/third_party/heimdal/lib/gssapi/spnego/spnego.asn1
@@ -0,0 +1,66 @@
+-- $Id$
+
+SPNEGO DEFINITIONS ::=
+BEGIN
+
+MechType::= OBJECT IDENTIFIER
+
+MechTypeList ::= SEQUENCE OF MechType
+
+ContextFlags ::= BIT STRING {
+ delegFlag (0),
+ mutualFlag (1),
+ replayFlag (2),
+ sequenceFlag (3),
+ anonFlag (4),
+ confFlag (5),
+ integFlag (6)
+}
+
+NegHints ::= SEQUENCE {
+ hintName [0] GeneralString OPTIONAL,
+ hintAddress [1] OCTET STRING OPTIONAL
+}
+
+NegTokenInit2 ::= SEQUENCE {
+ mechTypes [0] MechTypeList,
+ reqFlags [1] ContextFlags OPTIONAL,
+ mechToken [2] OCTET STRING OPTIONAL,
+ negHints [3] NegHints OPTIONAL
+}
+
+NegTokenInit ::= SEQUENCE {
+ mechTypes [0] MechTypeList,
+ reqFlags [1] ContextFlags OPTIONAL,
+ mechToken [2] OCTET STRING OPTIONAL,
+ mechListMIC [3] OCTET STRING OPTIONAL,
+ ...
+}
+
+NegStateEnum ::= ENUMERATED {
+ accept-completed(0),
+ accept-incomplete(1),
+ reject(2),
+ request-mic(3)
+}
+
+-- NB: negState is not OPTIONAL in the new SPNEGO spec but
+-- Windows clients do not always send it
+NegTokenResp ::= SEQUENCE {
+ negState [0] NegStateEnum OPTIONAL,
+ supportedMech [1] MechType OPTIONAL,
+ responseToken [2] OCTET STRING OPTIONAL,
+ mechListMIC [3] OCTET STRING OPTIONAL,
+ ...
+}
+
+NegotiationToken ::= CHOICE {
+ negTokenInit[0] NegTokenInit,
+ negTokenResp[1] NegTokenResp
+}
+
+NegotiationToken2 ::= CHOICE {
+ negTokenInit[0] NegTokenInit2
+}
+
+END
diff --git a/third_party/heimdal/lib/gssapi/spnego/spnego.opt b/third_party/heimdal/lib/gssapi/spnego/spnego.opt
new file mode 100644
index 00000000000..cbf2f2341db
--- /dev/null
+++ b/third_party/heimdal/lib/gssapi/spnego/spnego.opt
@@ -0,0 +1 @@
+--sequence=MechTypeList
diff --git a/third_party/heimdal/lib/gssapi/spnego/spnego_locl.h b/third_party/heimdal/lib/gssapi/spnego/spnego_locl.h
new file mode 100644
index 00000000000..e3434f252a3
--- /dev/null
+++ b/third_party/heimdal/lib/gssapi/spnego/spnego_locl.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2004, PADL Software Pty Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of PADL Software nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL PADL SOFTWARE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $Id$ */
+
+#ifndef SPNEGO_LOCL_H
+#define SPNEGO_LOCL_H
+
+#include <config.h>
+
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+
+#include <roken.h>
+
+#ifdef HAVE_PTHREAD_H
+#include <pthread.h>
+#endif
+
+#include <krb5.h>
+#include <gssapi.h>
+#include <gssapi_krb5.h>
+#include <gssapi_spnego.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <ctype.h>
+#ifdef HAVE_NETDB_H
+#include <netdb.h>
+#endif
+
+#include <heim_threads.h>
+#include <heimqueue.h>
+#include <asn1_err.h>
+
+#include <gssapi_mech.h>
+
+#include "spnego_asn1.h"
+#include "negoex_locl.h"
+#include "utils.h"
+#include <der.h>
+
+#include <heimbase.h>
+
+#define ALLOC(X, N) (X) = calloc((N), sizeof(*(X)))
+
+#define CHECK(ret, x) do { (ret) = (x); if (ret) goto fail; } while (0)
+
+struct gssspnego_ctx_desc;
+typedef struct gssspnego_ctx_desc *gssspnego_ctx;
+
+typedef OM_uint32
+(*gssspnego_initiator_state)(OM_uint32 * minor_status,
+ gss_const_cred_id_t cred,
+ gssspnego_ctx ctx,
+ gss_const_name_t name,
+ gss_const_OID mech_type,
+ OM_uint32 req_flags,
+ OM_uint32 time_req,
+ const gss_channel_bindings_t input_chan_bindings,
+ gss_const_buffer_t input_token,
+ gss_buffer_t output_token,
+ OM_uint32 * ret_flags,
+ OM_uint32 * time_rec);
+
+struct gssspnego_ctx_desc {
+ gss_buffer_desc NegTokenInit_mech_types;
+ gss_OID preferred_mech_type;
+ gss_OID selected_mech_type;
+ gss_OID negotiated_mech_type;
+ gss_ctx_id_t negotiated_ctx_id;
+ OM_uint32 mech_flags;
+ OM_uint32 mech_time_rec;
+ gss_name_t mech_src_name;
+ struct spnego_flags {
+ unsigned int open : 1;
+ unsigned int local : 1;
+ unsigned int require_mic : 1;
+ unsigned int peer_require_mic : 1;
+ unsigned int sent_mic : 1;
+ unsigned int verified_mic : 1;
+ unsigned int safe_omit : 1;
+ unsigned int maybe_open : 1;
+ unsigned int seen_supported_mech : 1;
+ } flags;
+ HEIMDAL_MUTEX ctx_id_mutex;
+
+ gss_name_t target_name;
+ gssspnego_initiator_state initiator_state;
+
+ uint8_t negoex_step;
+ krb5_storage *negoex_transcript;
+ uint32_t negoex_seqnum;
+ conversation_id negoex_conv_id;
+ HEIM_TAILQ_HEAD(negoex_mech_list, negoex_auth_mech) negoex_mechs;
+};
+
+extern gss_OID_desc _gss_spnego_mskrb_mechanism_oid_desc;
+
+struct gssspnego_optimistic_ctx {
+ gssspnego_ctx spnegoctx;
+ OM_uint32 req_flags;
+ gss_name_t target_name;
+ OM_uint32 time_req;
+ gss_channel_bindings_t input_chan_bindings;
+ /* out */
+ gss_OID preferred_mech_type;
+ gss_OID negotiated_mech_type;
+ gss_buffer_desc optimistic_token;
+ OM_uint32 optimistic_flags, optimistic_time_rec;
+ gss_ctx_id_t gssctx;
+ int complete;
+ auth_scheme scheme;
+};
+
+#include "spnego-private.h"
+
+static inline int
+gssspnego_ctx_complete_p(gssspnego_ctx ctx)
+{
+ return ctx->flags.open &&
+ (ctx->flags.safe_omit || (ctx->flags.sent_mic && ctx->flags.verified_mic));
+}
+
+#endif /* SPNEGO_LOCL_H */