#include <stdint.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/kauth.h>
#include <sys/kernel.h>
#include <sys/mount_internal.h>
#include <sys/vnode.h>
#include <sys/ubc.h>
#include <sys/malloc.h>
#include <sys/kpi_mbuf.h>
#include <kern/host.h>
#include <libkern/libkern.h>
#include <mach/task.h>
#include <mach/task_special_ports.h>
#include <mach/host_priv.h>
#include <mach/thread_act.h>
#include <mach/mig_errors.h>
#include <mach/vm_map.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
#include <gssd/gssd_mach.h>
#include <nfs/rpcv2.h>
#include <nfs/nfsproto.h>
#include <nfs/nfs.h>
#include <nfs/nfsnode.h>
#include <nfs/nfs_gss.h>
#include <nfs/nfsmount.h>
#include <nfs/xdr_subs.h>
#include <nfs/nfsm_subs.h>
#include <nfs/nfs_gss.h>
#define NFS_GSS_MACH_MAX_RETRIES 3
#if NFSSERVER
u_long nfs_gss_svc_ctx_hash;
struct nfs_gss_svc_ctx_hashhead *nfs_gss_svc_ctx_hashtbl;
lck_mtx_t *nfs_gss_svc_ctx_mutex;
lck_grp_t *nfs_gss_svc_grp;
#endif
#if NFSCLIENT
lck_grp_t *nfs_gss_clnt_grp;
#endif
static u_char krb5_tokhead[] = { 0x60, 0x23 };
static u_char krb5_mech[] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 };
static u_char krb5_mic[] = { 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff };
static u_char krb5_wrap[] = { 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff };
static u_char iv0[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
#define KRB5_SZ_TOKHEAD sizeof(krb5_tokhead)
#define KRB5_SZ_MECH sizeof(krb5_mech)
#define KRB5_SZ_ALG sizeof(krb5_mic) // 8 - same as krb5_wrap
#define KRB5_SZ_SEQ 8
#define KRB5_SZ_CKSUM 8
#define KRB5_SZ_EXTRA 3 // a wrap token may be longer by up to this many octets
#define KRB5_SZ_TOKEN (KRB5_SZ_TOKHEAD + KRB5_SZ_MECH + KRB5_SZ_ALG + KRB5_SZ_SEQ + KRB5_SZ_CKSUM)
#define KRB5_SZ_TOKMAX (KRB5_SZ_TOKEN + KRB5_SZ_EXTRA)
#if NFSCLIENT
static int nfs_gss_clnt_ctx_find(struct nfsreq *);
static int nfs_gss_clnt_ctx_failover(struct nfsreq *);
static int nfs_gss_clnt_ctx_init(struct nfsreq *, struct nfs_gss_clnt_ctx *);
static int nfs_gss_clnt_ctx_callserver(struct nfsreq *, struct nfs_gss_clnt_ctx *);
static char *nfs_gss_clnt_svcname(struct nfsmount *);
static int nfs_gss_clnt_gssd_upcall(struct nfsreq *, struct nfs_gss_clnt_ctx *);
static void nfs_gss_clnt_ctx_remove(struct nfsmount *, struct nfs_gss_clnt_ctx *);
static int nfs_gss_clnt_ctx_delay(struct nfsreq *, int *);
#endif
#if NFSSERVER
static struct nfs_gss_svc_ctx *nfs_gss_svc_ctx_find(uint32_t);
static void nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *);
static void nfs_gss_svc_ctx_timer(void *, void *);
static int nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *);
static int nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *, uint32_t);
#endif
static void task_release_special_port(mach_port_t);
static mach_port_t task_copy_special_port(mach_port_t);
static void nfs_gss_mach_alloc_buffer(u_char *, uint32_t, vm_map_copy_t *);
static int nfs_gss_mach_vmcopyout(vm_map_copy_t, uint32_t, u_char *);
static int nfs_gss_token_get(des_key_schedule, u_char *, u_char *, int, uint32_t *, u_char *);
static int nfs_gss_token_put(des_key_schedule, u_char *, u_char *, int, int, u_char *);
static int nfs_gss_der_length_size(int);
static void nfs_gss_der_length_put(u_char **, int);
static int nfs_gss_der_length_get(u_char **);
static int nfs_gss_mchain_length(mbuf_t);
static int nfs_gss_append_chain(struct nfsm_chain *, mbuf_t);
static void nfs_gss_nfsm_chain(struct nfsm_chain *, mbuf_t);
static void nfs_gss_cksum_mchain(des_key_schedule, mbuf_t, u_char *, int, int, u_char *);
static void nfs_gss_cksum_chain(des_key_schedule, struct nfsm_chain *, u_char *, int, int, u_char *);
static void nfs_gss_cksum_rep(des_key_schedule, uint32_t, u_char *);
static void nfs_gss_encrypt_mchain(u_char *, mbuf_t, int, int, int);
static void nfs_gss_encrypt_chain(u_char *, struct nfsm_chain *, int, int, int);
static DES_LONG des_cbc_cksum(des_cblock *, des_cblock *, long, des_key_schedule, des_cblock *);
static void des_cbc_encrypt(des_cblock *, des_cblock *, long, des_key_schedule,
des_cblock *, des_cblock *, int);
#if NFSSERVER
thread_call_t nfs_gss_svc_ctx_timer_call;
int nfs_gss_timer_on = 0;
uint32_t nfs_gss_ctx_count = 0;
const uint32_t nfs_gss_ctx_max = GSS_SVC_MAXCONTEXTS;
#endif
void
nfs_gss_init(void)
{
#if NFSCLIENT
nfs_gss_clnt_grp = lck_grp_alloc_init("rpcsec_gss_clnt", LCK_GRP_ATTR_NULL);
#endif
#if NFSSERVER
nfs_gss_svc_grp = lck_grp_alloc_init("rpcsec_gss_svc", LCK_GRP_ATTR_NULL);
nfs_gss_svc_ctx_hashtbl = hashinit(SVC_CTX_HASHSZ, M_TEMP, &nfs_gss_svc_ctx_hash);
nfs_gss_svc_ctx_mutex = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL);
nfs_gss_svc_ctx_timer_call = thread_call_allocate(nfs_gss_svc_ctx_timer, NULL);
#endif
}
#if NFSCLIENT
static int
nfs_gss_clnt_ctx_find(struct nfsreq *req)
{
struct nfsmount *nmp = req->r_nmp;
struct nfs_gss_clnt_ctx *cp;
uid_t uid = kauth_cred_getuid(req->r_cred);
int error = 0;
int retrycnt = 0;
retry:
lck_mtx_lock(&nmp->nm_lock);
TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
if (cp->gss_clnt_uid == uid) {
if (cp->gss_clnt_flags & GSS_CTX_INVAL)
continue;
lck_mtx_unlock(&nmp->nm_lock);
nfs_gss_clnt_ctx_ref(req, cp);
return (0);
}
}
if (uid == 0) {
TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
if (!(cp->gss_clnt_flags & GSS_CTX_INVAL)) {
lck_mtx_unlock(&nmp->nm_lock);
nfs_gss_clnt_ctx_ref(req, cp);
return (0);
}
}
}
if (req->r_thread == NULL) {
if ((nmp->nm_flag & NFSMNT_SECGIVEN) == 0) {
error = nfs_gss_clnt_ctx_failover(req);
} else {
printf("nfs_gss_clnt_ctx_find: no context for async\n");
error = EAUTH;
}
lck_mtx_unlock(&nmp->nm_lock);
return (error);
}
MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
if (cp == NULL) {
lck_mtx_unlock(&nmp->nm_lock);
return (ENOMEM);
}
cp->gss_clnt_uid = uid;
cp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
cp->gss_clnt_thread = current_thread();
nfs_gss_clnt_ctx_ref(req, cp);
TAILQ_INSERT_TAIL(&nmp->nm_gsscl, cp, gss_clnt_entries);
lck_mtx_unlock(&nmp->nm_lock);
error = nfs_gss_clnt_ctx_init(req, cp);
if (error)
nfs_gss_clnt_ctx_unref(req);
if (error == ENEEDAUTH) {
error = nfs_gss_clnt_ctx_delay(req, &retrycnt);
if (!error)
goto retry;
}
if (error && (nmp->nm_flag & NFSMNT_SECGIVEN) == 0) {
lck_mtx_lock(&nmp->nm_lock);
error = nfs_gss_clnt_ctx_failover(req);
lck_mtx_unlock(&nmp->nm_lock);
}
return (error);
}
static int
nfs_gss_clnt_ctx_failover(struct nfsreq *req)
{
struct nfsmount *nmp = req->r_nmp;
struct nfs_gss_clnt_ctx *cp;
uid_t uid = kauth_cred_getuid(req->r_cred);
struct timeval now;
MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
if (cp == NULL)
return (ENOMEM);
cp->gss_clnt_service = RPCSEC_GSS_SVC_SYS;
cp->gss_clnt_uid = uid;
cp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
microuptime(&now);
cp->gss_clnt_ctime = now.tv_sec; nfs_gss_clnt_ctx_ref(req, cp);
TAILQ_INSERT_TAIL(&nmp->nm_gsscl, cp, gss_clnt_entries);
return (0);
}
int
nfs_gss_clnt_cred_put(struct nfsreq *req, struct nfsm_chain *nmc, mbuf_t args)
{
struct nfsmount *nmp = req->r_nmp;
struct nfs_gss_clnt_ctx *cp;
uint32_t seqnum = 0;
int error = 0;
int slpflag = 0;
int start, len, offset = 0;
int pad, toklen;
struct nfsm_chain nmc_tmp;
struct gss_seq *gsp;
u_char tokbuf[KRB5_SZ_TOKMAX];
u_char cksum[8];
struct timeval now;
retry:
if (req->r_gss_ctx == NULL) {
error = nfs_gss_clnt_ctx_find(req);
if (error)
return (error);
}
cp = req->r_gss_ctx;
if (cp->gss_clnt_service == RPCSEC_GSS_SVC_SYS) {
lck_mtx_lock(cp->gss_clnt_mtx);
microuptime(&now);
if (now.tv_sec > cp->gss_clnt_ctime + GSS_CLNT_SYS_VALID) {
cp->gss_clnt_flags |= GSS_CTX_INVAL;
lck_mtx_unlock(cp->gss_clnt_mtx);
nfs_gss_clnt_ctx_unref(req);
goto retry;
}
lck_mtx_unlock(cp->gss_clnt_mtx);
return (ENEEDAUTH);
}
lck_mtx_lock(cp->gss_clnt_mtx);
if (cp->gss_clnt_thread && cp->gss_clnt_thread != current_thread()) {
cp->gss_clnt_flags |= GSS_NEEDCTX;
slpflag = (PZERO-1) | PDROP | (((nmp->nm_flag & NFSMNT_INT) && req->r_thread) ? PCATCH : 0);
msleep(cp, cp->gss_clnt_mtx, slpflag, "ctxwait", NULL);
if ((error = nfs_sigintr(nmp, req, req->r_thread, 0)))
return (error);
nfs_gss_clnt_ctx_unref(req);
goto retry;
}
lck_mtx_unlock(cp->gss_clnt_mtx);
if (cp->gss_clnt_flags & GSS_CTX_COMPLETE) {
lck_mtx_lock(cp->gss_clnt_mtx);
while (win_getbit(cp->gss_clnt_seqbits,
((cp->gss_clnt_seqnum - cp->gss_clnt_seqwin) + 1) % cp->gss_clnt_seqwin)) {
cp->gss_clnt_flags |= GSS_NEEDSEQ;
slpflag = (PZERO-1) | (((nmp->nm_flag & NFSMNT_INT) && req->r_thread) ? PCATCH : 0);
msleep(cp, cp->gss_clnt_mtx, slpflag, "seqwin", NULL);
if ((error = nfs_sigintr(nmp, req, req->r_thread, 0))) {
lck_mtx_unlock(cp->gss_clnt_mtx);
return (error);
}
if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
lck_mtx_unlock(cp->gss_clnt_mtx);
nfs_gss_clnt_ctx_unref(req);
goto retry;
}
}
seqnum = ++cp->gss_clnt_seqnum;
win_setbit(cp->gss_clnt_seqbits, seqnum % cp->gss_clnt_seqwin);
lck_mtx_unlock(cp->gss_clnt_mtx);
MALLOC(gsp, struct gss_seq *, sizeof(*gsp), M_TEMP, M_WAITOK|M_ZERO);
if (gsp == NULL)
return (ENOMEM);
gsp->gss_seqnum = seqnum;
SLIST_INSERT_HEAD(&req->r_gss_seqlist, gsp, gss_seqnext);
}
nfsm_chain_add_32(error, nmc, RPCSEC_GSS);
nfsm_chain_add_32(error, nmc, 5 * NFSX_UNSIGNED + cp->gss_clnt_handle_len);
nfsm_chain_add_32(error, nmc, RPCSEC_GSS_VERS_1);
nfsm_chain_add_32(error, nmc, cp->gss_clnt_proc);
nfsm_chain_add_32(error, nmc, seqnum);
nfsm_chain_add_32(error, nmc, cp->gss_clnt_service);
nfsm_chain_add_32(error, nmc, cp->gss_clnt_handle_len);
nfsm_chain_add_opaque(error, nmc, cp->gss_clnt_handle, cp->gss_clnt_handle_len);
if (cp->gss_clnt_proc == RPCSEC_GSS_INIT ||
cp->gss_clnt_proc == RPCSEC_GSS_CONTINUE_INIT) {
nfsm_chain_add_32(error, nmc, RPCAUTH_NULL); nfsm_chain_add_32(error, nmc, 0); nfsm_chain_build_done(error, nmc);
if (!error)
nfs_gss_append_chain(nmc, args);
return (error);
}
offset = nmp->nm_sotype == SOCK_STREAM ? NFSX_UNSIGNED : 0; nfsm_chain_build_done(error, nmc);
nfs_gss_cksum_chain(cp->gss_clnt_sched, nmc, krb5_mic, offset, 0, cksum);
toklen = nfs_gss_token_put(cp->gss_clnt_sched, krb5_mic, tokbuf, 1, 0, cksum);
nfsm_chain_add_32(error, nmc, RPCSEC_GSS); nfsm_chain_add_32(error, nmc, toklen); nfsm_chain_add_opaque(error, nmc, tokbuf, toklen);
nfsm_chain_build_done(error, nmc);
if (error)
return (error);
switch (cp->gss_clnt_service) {
case RPCSEC_GSS_SVC_NONE:
nfs_gss_append_chain(nmc, args);
break;
case RPCSEC_GSS_SVC_INTEGRITY:
len = nfs_gss_mchain_length(args); req->r_gss_arglen = len; len += NFSX_UNSIGNED; nfsm_chain_add_32(error, nmc, len); start = nfsm_chain_offset(nmc);
nfsm_chain_add_32(error, nmc, seqnum); req->r_gss_argoff = nfsm_chain_offset(nmc); nfsm_chain_build_done(error, nmc);
if (error)
return (error);
nfs_gss_append_chain(nmc, args);
nfs_gss_cksum_chain(cp->gss_clnt_sched, nmc, krb5_mic, start, len, cksum);
toklen = nfs_gss_token_put(cp->gss_clnt_sched, krb5_mic, tokbuf, 1, 0, cksum);
nfsm_chain_finish_mbuf(error, nmc); nfsm_chain_add_32(error, nmc, toklen);
nfsm_chain_add_opaque(error, nmc, tokbuf, toklen);
nfsm_chain_build_done(error, nmc);
break;
case RPCSEC_GSS_SVC_PRIVACY:
nfsm_chain_build_alloc_init(error, &nmc_tmp, 3 * NFSX_UNSIGNED);
nfsm_chain_add_32(error, &nmc_tmp, random()); nfsm_chain_add_32(error, &nmc_tmp, random()); nfsm_chain_add_32(error, &nmc_tmp, seqnum);
nfsm_chain_build_done(error, &nmc_tmp);
if (error)
return (error);
nfs_gss_append_chain(&nmc_tmp, args);
len = nfs_gss_mchain_length(args); len += 3 * NFSX_UNSIGNED; req->r_gss_arglen = len;
nfsm_chain_finish_mbuf(error, &nmc_tmp); if (len % 8 > 0) {
nfsm_chain_add_32(error, &nmc_tmp, 0x04040404);
len += NFSX_UNSIGNED;
} else {
nfsm_chain_add_32(error, &nmc_tmp, 0x08080808);
nfsm_chain_add_32(error, &nmc_tmp, 0x08080808);
len += 2 * NFSX_UNSIGNED;
}
nfsm_chain_build_done(error, &nmc_tmp);
nfs_gss_cksum_chain(cp->gss_clnt_sched, &nmc_tmp, krb5_wrap, 0, len, cksum);
toklen = nfs_gss_token_put(cp->gss_clnt_sched, krb5_wrap, tokbuf, 1, len, cksum);
nfsm_chain_add_32(error, nmc, toklen + len); nfsm_chain_add_opaque_nopad(error, nmc, tokbuf, toklen);
req->r_gss_argoff = nfsm_chain_offset(nmc); nfsm_chain_build_done(error, nmc);
if (error)
return (error);
nfs_gss_append_chain(nmc, nmc_tmp.nmc_mhead);
nfs_gss_encrypt_chain(cp->gss_clnt_skey, &nmc_tmp, 0, len, DES_ENCRYPT);
pad = nfsm_pad(toklen + len);
if (pad > 0) {
nfsm_chain_add_opaque_nopad(error, nmc, iv0, pad);
nfsm_chain_build_done(error, nmc);
}
break;
}
return (error);
}
int
nfs_gss_clnt_verf_get(
struct nfsreq *req,
struct nfsm_chain *nmc,
uint32_t verftype,
uint32_t verflen,
uint32_t *accepted_statusp)
{
u_char tokbuf[KRB5_SZ_TOKMAX];
u_char cksum1[8], cksum2[8];
uint32_t seqnum = 0;
struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
struct nfsm_chain nmc_tmp;
struct gss_seq *gsp;
uint32_t reslen, start, cksumlen, toklen;
int error = 0;
reslen = cksumlen = 0;
*accepted_statusp = 0;
if (cp == NULL)
return (EAUTH);
if (verftype != RPCSEC_GSS) {
if (verftype != RPCAUTH_NULL)
return (EAUTH);
if (cp->gss_clnt_flags & GSS_CTX_COMPLETE &&
cp->gss_clnt_service != RPCSEC_GSS_SVC_SYS)
return (EAUTH);
if (verflen > 0)
nfsm_chain_adv(error, nmc, nfsm_rndup(verflen));
nfsm_chain_get_32(error, nmc, *accepted_statusp);
return (error);
}
if (verflen != KRB5_SZ_TOKEN)
return (EAUTH);
if (!(cp->gss_clnt_flags & GSS_CTX_COMPLETE)) {
MALLOC(cp->gss_clnt_verf, u_char *, verflen, M_TEMP, M_WAITOK|M_ZERO);
if (cp->gss_clnt_verf == NULL)
return (ENOMEM);
nfsm_chain_get_opaque(error, nmc, verflen, cp->gss_clnt_verf);
nfsm_chain_get_32(error, nmc, *accepted_statusp);
return (error);
}
nfsm_chain_get_opaque(error, nmc, verflen, tokbuf);
if (error)
goto nfsmout;
error = nfs_gss_token_get(cp->gss_clnt_sched, krb5_mic, tokbuf, 0, NULL, cksum1);
if (error)
goto nfsmout;
SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) {
nfs_gss_cksum_rep(cp->gss_clnt_sched, gsp->gss_seqnum, cksum2);
if (bcmp(cksum1, cksum2, 8) == 0)
break;
}
if (gsp == NULL)
return (EAUTH);
nfsm_chain_get_32(error, nmc, *accepted_statusp);
if (*accepted_statusp != RPC_SUCCESS)
return (0);
switch (cp->gss_clnt_service) {
case RPCSEC_GSS_SVC_NONE:
break;
case RPCSEC_GSS_SVC_INTEGRITY:
nfsm_chain_get_32(error, nmc, reslen); if (reslen > NFS_MAXPACKET) {
error = EBADRPC;
goto nfsmout;
}
start = nfsm_chain_offset(nmc);
nfs_gss_cksum_chain(cp->gss_clnt_sched, nmc, krb5_mic, start, reslen, cksum1);
nfsm_chain_get_32(error, nmc, seqnum);
SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) {
if (seqnum == gsp->gss_seqnum)
break;
}
if (gsp == NULL) {
error = EBADRPC;
goto nfsmout;
}
nmc_tmp = *nmc;
reslen -= NFSX_UNSIGNED; nfsm_chain_adv(error, &nmc_tmp, reslen); nfsm_chain_get_32(error, &nmc_tmp, cksumlen); if (cksumlen != KRB5_SZ_TOKEN) {
error = EBADRPC;
goto nfsmout;
}
nfsm_chain_get_opaque(error, &nmc_tmp, cksumlen, tokbuf);
if (error)
goto nfsmout;
error = nfs_gss_token_get(cp->gss_clnt_sched, krb5_mic, tokbuf, 0,
NULL, cksum2);
if (error)
goto nfsmout;
if (bcmp(cksum1, cksum2, 8) != 0) {
error = EBADRPC;
goto nfsmout;
}
break;
case RPCSEC_GSS_SVC_PRIVACY:
nfsm_chain_get_32(error, nmc, reslen); if (reslen > NFS_MAXPACKET) {
error = EBADRPC;
goto nfsmout;
}
nfsm_chain_get_opaque(error, nmc, KRB5_SZ_TOKMAX, tokbuf);
if (error)
goto nfsmout;
error = nfs_gss_token_get(cp->gss_clnt_sched, krb5_wrap, tokbuf, 0,
&toklen, cksum1);
if (error)
goto nfsmout;
nfsm_chain_reverse(nmc, nfsm_pad(toklen));
reslen -= toklen;
start = nfsm_chain_offset(nmc);
nfs_gss_encrypt_chain(cp->gss_clnt_skey, nmc, start, reslen, DES_DECRYPT);
nfs_gss_cksum_chain(cp->gss_clnt_sched, nmc, krb5_wrap, start, reslen, cksum2);
if (bcmp(cksum1, cksum2, 8) != 0) {
error = EBADRPC;
goto nfsmout;
}
nfsm_chain_adv(error, nmc, 8);
nfsm_chain_get_32(error, nmc, seqnum);
SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) {
if (seqnum == gsp->gss_seqnum)
break;
}
if (gsp == NULL) {
error = EBADRPC;
goto nfsmout;
}
break;
}
nfsmout:
return (error);
}
int
nfs_gss_clnt_args_restore(struct nfsreq *req)
{
struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
struct nfsm_chain mchain, *nmc = &mchain;
int len, error = 0;
if (cp == NULL)
return (EAUTH);
if ((cp->gss_clnt_flags & GSS_CTX_COMPLETE) == 0)
return (ENEEDAUTH);
nfsm_chain_dissect_init(error, nmc, req->r_mhead); nfsm_chain_adv(error, nmc, req->r_gss_argoff); if (error)
return (error);
switch (cp->gss_clnt_service) {
case RPCSEC_GSS_SVC_NONE:
break;
case RPCSEC_GSS_SVC_INTEGRITY:
nfsm_chain_adv(error, nmc, req->r_gss_arglen); if (error)
return (error);
mbuf_freem(mbuf_next(nmc->nmc_mcur)); error = mbuf_setnext(nmc->nmc_mcur, NULL);
break;
case RPCSEC_GSS_SVC_PRIVACY:
len = req->r_gss_arglen;
len += len % 8 > 0 ? 4 : 8; nfs_gss_encrypt_chain(cp->gss_clnt_skey, nmc,
req->r_gss_argoff, len, DES_DECRYPT);
nfsm_chain_adv(error, nmc, req->r_gss_arglen);
if (error)
return (error);
mbuf_freem(mbuf_next(nmc->nmc_mcur)); error = mbuf_setnext(nmc->nmc_mcur, NULL);
break;
}
return (error);
}
static int
nfs_gss_clnt_ctx_init(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
{
struct nfsmount *nmp = req->r_nmp;
int client_complete = 0;
int server_complete = 0;
u_char cksum1[8], cksum2[8];
int error = 0;
struct timeval now;
cp->gss_clnt_svcname = nfs_gss_clnt_svcname(nmp);
if (cp->gss_clnt_svcname == NULL) {
error = EAUTH;
goto nfsmout;
}
cp->gss_clnt_proc = RPCSEC_GSS_INIT;
cp->gss_clnt_service =
nmp->nm_auth == RPCAUTH_KRB5 ? RPCSEC_GSS_SVC_NONE :
nmp->nm_auth == RPCAUTH_KRB5I ? RPCSEC_GSS_SVC_INTEGRITY :
nmp->nm_auth == RPCAUTH_KRB5P ? RPCSEC_GSS_SVC_PRIVACY : 0;
for (;;) {
error = nfs_gss_clnt_gssd_upcall(req, cp);
if (error)
goto nfsmout;
if (cp->gss_clnt_major == GSS_S_COMPLETE) {
client_complete = 1;
if (server_complete)
break;
} else if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
error = EAUTH;
goto nfsmout;
}
error = nfs_gss_clnt_ctx_callserver(req, cp);
if (error)
goto nfsmout;
if (cp->gss_clnt_major == GSS_S_COMPLETE) {
server_complete = 1;
if (client_complete)
break;
} else if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
error = EAUTH;
goto nfsmout;
}
cp->gss_clnt_proc = RPCSEC_GSS_CONTINUE_INIT;
}
cp->gss_clnt_flags |= GSS_CTX_COMPLETE;
cp->gss_clnt_proc = RPCSEC_GSS_DATA;
microuptime(&now);
cp->gss_clnt_ctime = now.tv_sec;
error = des_key_sched((des_cblock *) cp->gss_clnt_skey, cp->gss_clnt_sched);
if (error) {
error = EAUTH;
goto nfsmout;
}
nfs_gss_cksum_rep(cp->gss_clnt_sched, cp->gss_clnt_seqwin, cksum1);
error = nfs_gss_token_get(cp->gss_clnt_sched, krb5_mic, cp->gss_clnt_verf, 0,
NULL, cksum2);
FREE(cp->gss_clnt_verf, M_TEMP);
cp->gss_clnt_verf = NULL;
if (error || bcmp(cksum1, cksum2, 8) != 0) {
error = EAUTH;
goto nfsmout;
}
cp->gss_clnt_seqnum = (random() & 0xffff) + cp->gss_clnt_seqwin;
MALLOC(cp->gss_clnt_seqbits, uint32_t *,
nfsm_rndup((cp->gss_clnt_seqwin + 7) / 8), M_TEMP, M_WAITOK|M_ZERO);
if (cp->gss_clnt_seqbits == NULL)
error = EAUTH;
nfsmout:
if (error)
cp->gss_clnt_flags |= GSS_CTX_INVAL;
lck_mtx_lock(cp->gss_clnt_mtx);
cp->gss_clnt_thread = NULL;
if (cp->gss_clnt_flags & GSS_NEEDCTX) {
cp->gss_clnt_flags &= ~GSS_NEEDCTX;
wakeup(cp);
}
lck_mtx_unlock(cp->gss_clnt_mtx);
return (error);
}
static int
nfs_gss_clnt_ctx_callserver(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
{
struct nfsmount *nmp = req->r_nmp;
struct nfsm_chain nmreq, nmrep;
int error = 0, status;
u_int64_t xid;
int sz;
nfsm_chain_null(&nmreq);
nfsm_chain_null(&nmrep);
sz = NFSX_UNSIGNED + nfsm_rndup(cp->gss_clnt_tokenlen);
nfsm_chain_build_alloc_init(error, &nmreq, sz);
nfsm_chain_add_32(error, &nmreq, cp->gss_clnt_tokenlen);
nfsm_chain_add_opaque(error, &nmreq, cp->gss_clnt_token, cp->gss_clnt_tokenlen);
nfsm_chain_build_done(error, &nmreq);
if (error)
goto nfsmout;
error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC_NULL,
req->r_thread, req->r_cred, 0, &nmrep, &xid, &status);
if (cp->gss_clnt_token != NULL) {
FREE(cp->gss_clnt_token, M_TEMP);
cp->gss_clnt_token = NULL;
}
if (!error)
error = status;
if (error)
goto nfsmout;
nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_handle_len);
if (cp->gss_clnt_handle != NULL)
FREE(cp->gss_clnt_handle, M_TEMP);
if (cp->gss_clnt_handle_len > 0) {
MALLOC(cp->gss_clnt_handle, u_char *, cp->gss_clnt_handle_len, M_TEMP, M_WAITOK);
if (cp->gss_clnt_handle == NULL) {
error = ENOMEM;
goto nfsmout;
}
nfsm_chain_get_opaque(error, &nmrep, cp->gss_clnt_handle_len, cp->gss_clnt_handle);
}
nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_major);
nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_minor);
nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_seqwin);
nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_tokenlen);
if (error)
goto nfsmout;
if (cp->gss_clnt_tokenlen > 0) {
MALLOC(cp->gss_clnt_token, u_char *, cp->gss_clnt_tokenlen, M_TEMP, M_WAITOK);
if (cp->gss_clnt_token == NULL) {
error = ENOMEM;
goto nfsmout;
}
nfsm_chain_get_opaque(error, &nmrep, cp->gss_clnt_tokenlen, cp->gss_clnt_token);
}
if (cp->gss_clnt_major != GSS_S_COMPLETE &&
cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
char who[] = "server";
(void) mach_gss_log_error(
cp->gss_clnt_mport,
vfs_statfs(nmp->nm_mountp)->f_mntfromname,
cp->gss_clnt_uid,
who,
cp->gss_clnt_major,
cp->gss_clnt_minor);
}
nfsmout:
nfsm_chain_cleanup(&nmreq);
nfsm_chain_cleanup(&nmrep);
return (error);
}
static char *
nfs_gss_clnt_svcname(struct nfsmount *nmp)
{
char *svcname, *d;
char* mntfromhere = &vfs_statfs(nmp->nm_mountp)->f_mntfromname[0];
int len;
len = strlen(mntfromhere) + 5;
MALLOC(svcname, char *, len, M_TEMP, M_NOWAIT);
if (svcname == NULL)
return (NULL);
strlcpy(svcname, "nfs/", len);
strlcat(svcname, mntfromhere, len);
d = strchr(svcname, ':');
if (d)
*d = '\0';
return (svcname);
}
static int
nfs_gss_clnt_gssd_upcall(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
{
kern_return_t kr;
byte_buffer okey = NULL;
uint32_t skeylen = 0;
int retry_cnt = 0;
vm_map_copy_t itoken = NULL;
byte_buffer otoken = NULL;
int error = 0;
char uprinc[1];
uprinc[0] = '\0';
if (cp->gss_clnt_mport == NULL) {
kr = task_get_gssd_port(get_threadtask(req->r_thread), &cp->gss_clnt_mport);
if (kr != KERN_SUCCESS) {
printf("nfs_gss_clnt_gssd_upcall: can't get gssd port, status %d\n", kr);
return (EAUTH);
}
if (!IPC_PORT_VALID(cp->gss_clnt_mport)) {
printf("nfs_gss_clnt_gssd_upcall: gssd port not valid\n");
cp->gss_clnt_mport = NULL;
return (EAUTH);
}
}
if (cp->gss_clnt_tokenlen > 0)
nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken);
retry:
kr = mach_gss_init_sec_context(
cp->gss_clnt_mport,
KRB5_MECH,
(byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_clnt_tokenlen,
cp->gss_clnt_uid,
uprinc,
cp->gss_clnt_svcname,
GSSD_MUTUAL_FLAG | GSSD_NO_UI,
&cp->gss_clnt_gssd_verf,
&cp->gss_clnt_context,
&cp->gss_clnt_cred_handle,
&okey, (mach_msg_type_number_t *) &skeylen,
&otoken, (mach_msg_type_number_t *) &cp->gss_clnt_tokenlen,
&cp->gss_clnt_major,
&cp->gss_clnt_minor);
if (kr != 0) {
printf("nfs_gss_clnt_gssd_upcall: mach_gss_init_sec_context failed: %x\n", kr);
if (kr == MIG_SERVER_DIED && cp->gss_clnt_cred_handle == 0 &&
retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES)
goto retry;
task_release_special_port(cp->gss_clnt_mport);
cp->gss_clnt_mport = NULL;
return (EAUTH);
}
if (cp->gss_clnt_major != GSS_S_COMPLETE &&
cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
char who[] = "client";
(void) mach_gss_log_error(
cp->gss_clnt_mport,
vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname,
cp->gss_clnt_uid,
who,
cp->gss_clnt_major,
cp->gss_clnt_minor);
}
if (skeylen > 0) {
if (skeylen != SKEYLEN) {
printf("nfs_gss_clnt_gssd_upcall: bad key length (%d)\n", skeylen);
return (EAUTH);
}
error = nfs_gss_mach_vmcopyout((vm_map_copy_t) okey, skeylen, cp->gss_clnt_skey);
if (error)
return (EAUTH);
}
if (cp->gss_clnt_tokenlen > 0) {
MALLOC(cp->gss_clnt_token, u_char *, cp->gss_clnt_tokenlen, M_TEMP, M_WAITOK);
if (cp->gss_clnt_token == NULL)
return (ENOMEM);
error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, cp->gss_clnt_tokenlen,
cp->gss_clnt_token);
if (error)
return (EAUTH);
}
return (0);
}
void
nfs_gss_clnt_rpcdone(struct nfsreq *req)
{
struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
struct gss_seq *gsp, *ngsp;
int i = 0;
if (cp == NULL || !(cp->gss_clnt_flags & GSS_CTX_COMPLETE))
return;
lck_mtx_lock(cp->gss_clnt_mtx);
gsp = SLIST_FIRST(&req->r_gss_seqlist);
if (gsp && gsp->gss_seqnum > (cp->gss_clnt_seqnum - cp->gss_clnt_seqwin))
win_resetbit(cp->gss_clnt_seqbits,
gsp->gss_seqnum % cp->gss_clnt_seqwin);
SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp) {
if (++i > GSS_CLNT_SEQLISTMAX) {
SLIST_REMOVE(&req->r_gss_seqlist, gsp, gss_seq, gss_seqnext);
FREE(gsp, M_TEMP);
}
}
if (cp->gss_clnt_flags & GSS_NEEDSEQ) {
cp->gss_clnt_flags &= ~GSS_NEEDSEQ;
wakeup(cp);
}
lck_mtx_unlock(cp->gss_clnt_mtx);
}
void
nfs_gss_clnt_ctx_ref(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
{
req->r_gss_ctx = cp;
lck_mtx_lock(cp->gss_clnt_mtx);
cp->gss_clnt_refcnt++;
lck_mtx_unlock(cp->gss_clnt_mtx);
}
void
nfs_gss_clnt_ctx_unref(struct nfsreq *req)
{
struct nfsmount *nmp = req->r_nmp;
struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
if (cp == NULL)
return;
req->r_gss_ctx = NULL;
lck_mtx_lock(cp->gss_clnt_mtx);
if (--cp->gss_clnt_refcnt == 0
&& cp->gss_clnt_flags & GSS_CTX_INVAL) {
lck_mtx_unlock(cp->gss_clnt_mtx);
if (nmp)
lck_mtx_lock(&nmp->nm_lock);
nfs_gss_clnt_ctx_remove(nmp, cp);
if (nmp)
lck_mtx_unlock(&nmp->nm_lock);
return;
}
lck_mtx_unlock(cp->gss_clnt_mtx);
}
static void
nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, struct nfs_gss_clnt_ctx *cp)
{
if (nmp != NULL)
TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
if (cp->gss_clnt_mport)
task_release_special_port(cp->gss_clnt_mport);
if (cp->gss_clnt_mtx)
lck_mtx_destroy(cp->gss_clnt_mtx, nfs_gss_clnt_grp);
if (cp->gss_clnt_handle)
FREE(cp->gss_clnt_handle, M_TEMP);
if (cp->gss_clnt_seqbits)
FREE(cp->gss_clnt_seqbits, M_TEMP);
if (cp->gss_clnt_token)
FREE(cp->gss_clnt_token, M_TEMP);
if (cp->gss_clnt_svcname)
FREE(cp->gss_clnt_svcname, M_TEMP);
FREE(cp, M_TEMP);
}
int
nfs_gss_clnt_ctx_renew(struct nfsreq *req)
{
struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
struct nfsmount *nmp = req->r_nmp;
struct nfs_gss_clnt_ctx *ncp;
int error = 0;
uid_t saved_uid;
mach_port_t saved_mport;
int retrycnt = 0;
if (cp == NULL || !(cp->gss_clnt_flags & GSS_CTX_COMPLETE))
return (0);
lck_mtx_lock(cp->gss_clnt_mtx);
if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
lck_mtx_unlock(cp->gss_clnt_mtx);
nfs_gss_clnt_ctx_unref(req);
return (0); }
saved_uid = cp->gss_clnt_uid;
saved_mport = task_copy_special_port(cp->gss_clnt_mport);
lck_mtx_lock(&nmp->nm_lock);
cp->gss_clnt_flags |= GSS_CTX_INVAL;
lck_mtx_unlock(&nmp->nm_lock);
if (cp->gss_clnt_flags & (GSS_NEEDCTX | GSS_NEEDSEQ)) {
cp->gss_clnt_flags &= ~GSS_NEEDSEQ;
wakeup(cp);
}
lck_mtx_unlock(cp->gss_clnt_mtx);
retry:
MALLOC(ncp, struct nfs_gss_clnt_ctx *, sizeof(*ncp),
M_TEMP, M_WAITOK|M_ZERO);
if (ncp == NULL) {
return (ENOMEM);
}
ncp->gss_clnt_uid = saved_uid;
ncp->gss_clnt_mport = task_copy_special_port(saved_mport); ncp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL);
ncp->gss_clnt_thread = current_thread();
lck_mtx_lock(&nmp->nm_lock);
TAILQ_INSERT_TAIL(&nmp->nm_gsscl, ncp, gss_clnt_entries);
lck_mtx_unlock(&nmp->nm_lock);
nfs_gss_clnt_ctx_unref(req);
nfs_gss_clnt_ctx_ref(req, ncp);
error = nfs_gss_clnt_ctx_init(req, ncp); if (error == ENEEDAUTH) {
error = nfs_gss_clnt_ctx_delay(req, &retrycnt);
if (!error)
goto retry;
}
task_release_special_port(saved_mport);
if (error)
nfs_gss_clnt_ctx_unref(req);
return (error);
}
void
nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp, int mntflags)
{
struct nfs_gss_clnt_ctx *cp;
struct ucred temp_cred;
kauth_cred_t cred;
struct nfsm_chain nmreq, nmrep;
u_int64_t xid;
int error, status;
struct nfsreq req;
bzero((caddr_t) &temp_cred, sizeof(temp_cred));
temp_cred.cr_ngroups = 1;
req.r_nmp = nmp;
for (;;) {
lck_mtx_lock(&nmp->nm_lock);
cp = TAILQ_FIRST(&nmp->nm_gsscl);
lck_mtx_unlock(&nmp->nm_lock);
if (cp == NULL)
break;
nfs_gss_clnt_ctx_ref(&req, cp);
if (!(mntflags & MNT_FORCE) && cp->gss_clnt_service != RPCSEC_GSS_SVC_SYS) {
temp_cred.cr_uid = cp->gss_clnt_uid;
cred = kauth_cred_create(&temp_cred);
cp->gss_clnt_proc = RPCSEC_GSS_DESTROY;
error = 0;
nfsm_chain_null(&nmreq);
nfsm_chain_null(&nmrep);
nfsm_chain_build_alloc_init(error, &nmreq, 0);
nfsm_chain_build_done(error, &nmreq);
if (!error)
nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC_NULL,
current_thread(), cred, 0, &nmrep, &xid, &status);
nfsm_chain_cleanup(&nmreq);
nfsm_chain_cleanup(&nmrep);
kauth_cred_unref(&cred);
}
cp->gss_clnt_flags |= GSS_CTX_INVAL;
nfs_gss_clnt_ctx_unref(&req);
}
}
static int
nfs_gss_clnt_ctx_delay(struct nfsreq *req, int *retry)
{
int timeo = (1 << *retry) * NFS_TRYLATERDEL;
int error = 0;
struct nfsmount *nmp = req->r_nmp;
struct timeval now;
time_t waituntil;
if ((nmp->nm_flag & NFSMNT_SOFT) && *retry > nmp->nm_retry)
return (ETIMEDOUT);
if (timeo > 60)
timeo = 60;
microuptime(&now);
waituntil = now.tv_sec + timeo;
while (now.tv_sec < waituntil) {
tsleep(&lbolt, PSOCK, "nfs_gss_clnt_ctx_delay", 0);
error = nfs_sigintr(nmp, req, current_thread(), 0);
if (error)
break;
microuptime(&now);
}
*retry += 1;
return (error);
}
#endif
#if NFSSERVER
static struct nfs_gss_svc_ctx *
nfs_gss_svc_ctx_find(uint32_t handle)
{
struct nfs_gss_svc_ctx_hashhead *head;
struct nfs_gss_svc_ctx *cp;
head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(handle)];
lck_mtx_lock(nfs_gss_svc_ctx_mutex);
LIST_FOREACH(cp, head, gss_svc_entries)
if (cp->gss_svc_handle == handle)
break;
lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
return (cp);
}
static void
nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *cp)
{
struct nfs_gss_svc_ctx_hashhead *head;
head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(cp->gss_svc_handle)];
lck_mtx_lock(nfs_gss_svc_ctx_mutex);
LIST_INSERT_HEAD(head, cp, gss_svc_entries);
nfs_gss_ctx_count++;
if (!nfs_gss_timer_on) {
nfs_gss_timer_on = 1;
nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
GSS_TIMER_PERIOD * MSECS_PER_SEC);
}
lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
}
void
nfs_gss_svc_ctx_timer(__unused void *param1, __unused void *param2)
{
struct nfs_gss_svc_ctx_hashhead *head;
struct nfs_gss_svc_ctx *cp, *next;
uint64_t timenow;
int contexts = 0;
int i;
lck_mtx_lock(nfs_gss_svc_ctx_mutex);
clock_get_uptime(&timenow);
for (i = 0; i < SVC_CTX_HASHSZ; i++) {
head = &nfs_gss_svc_ctx_hashtbl[i];
for (cp = LIST_FIRST(head); cp; cp = next) {
contexts++;
next = LIST_NEXT(cp, gss_svc_entries);
if (timenow > cp->gss_svc_expiretime) {
LIST_REMOVE(cp, gss_svc_entries);
if (cp->gss_svc_seqbits)
FREE(cp->gss_svc_seqbits, M_TEMP);
lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
FREE(cp, M_TEMP);
contexts--;
}
}
}
nfs_gss_ctx_count = contexts;
nfs_gss_timer_on = nfs_gss_ctx_count > 0;
if (nfs_gss_timer_on)
nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
GSS_TIMER_PERIOD * MSECS_PER_SEC);
lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
}
int
nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
{
uint32_t vers, proc, seqnum, service;
uint32_t handle, handle_len;
struct nfs_gss_svc_ctx *cp = NULL;
uint32_t flavor = 0, verflen = 0;
int error = 0;
uint32_t arglen, start, toklen, cksumlen;
u_char tokbuf[KRB5_SZ_TOKMAX];
u_char cksum1[8], cksum2[8];
struct nfsm_chain nmc_tmp;
vers = proc = seqnum = service = handle_len = 0;
arglen = cksumlen = 0;
nfsm_chain_get_32(error, nmc, vers);
if (vers != RPCSEC_GSS_VERS_1) {
error = NFSERR_AUTHERR | AUTH_REJECTCRED;
goto nfsmout;
}
nfsm_chain_get_32(error, nmc, proc);
nfsm_chain_get_32(error, nmc, seqnum);
nfsm_chain_get_32(error, nmc, service);
nfsm_chain_get_32(error, nmc, handle_len);
if (error)
goto nfsmout;
if (proc != RPCSEC_GSS_DATA && nd->nd_procnum != NFSPROC_NULL) {
error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
goto nfsmout;
}
if (seqnum > GSS_MAXSEQ) {
error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
goto nfsmout;
}
nd->nd_sec =
service == RPCSEC_GSS_SVC_NONE ? RPCAUTH_KRB5 :
service == RPCSEC_GSS_SVC_INTEGRITY ? RPCAUTH_KRB5I :
service == RPCSEC_GSS_SVC_PRIVACY ? RPCAUTH_KRB5P : 0;
if (proc == RPCSEC_GSS_INIT) {
if (nfs_gss_ctx_count > nfs_gss_ctx_max) {
error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
goto nfsmout;
}
MALLOC(cp, struct nfs_gss_svc_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO);
if (cp == NULL) {
error = ENOMEM;
goto nfsmout;
}
} else {
if (handle_len != sizeof(handle)) {
error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
goto nfsmout;
}
nfsm_chain_get_32(error, nmc, handle);
if (error)
goto nfsmout;
cp = nfs_gss_svc_ctx_find(handle);
if (cp == NULL) {
error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
goto nfsmout;
}
}
cp->gss_svc_proc = proc;
if (proc == RPCSEC_GSS_DATA || proc == RPCSEC_GSS_DESTROY) {
struct ucred temp_cred;
if (cp->gss_svc_seqwin == 0) {
error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
goto nfsmout;
}
if (!nfs_gss_svc_seqnum_valid(cp, seqnum)) {
error = EINVAL; goto nfsmout;
}
nfs_gss_cksum_chain(cp->gss_svc_sched, nmc, krb5_mic, 0, 0, cksum1);
nfsm_chain_get_32(error, nmc, flavor);
nfsm_chain_get_32(error, nmc, verflen);
if (flavor != RPCSEC_GSS || verflen != KRB5_SZ_TOKEN)
error = NFSERR_AUTHERR | AUTH_BADVERF;
nfsm_chain_get_opaque(error, nmc, verflen, tokbuf);
if (error)
goto nfsmout;
error = nfs_gss_token_get(cp->gss_svc_sched, krb5_mic, tokbuf, 1,
NULL, cksum2);
if (error)
goto nfsmout;
if (bcmp(cksum1, cksum2, 8) != 0) {
error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
goto nfsmout;
}
nd->nd_gss_seqnum = seqnum;
bzero(&temp_cred, sizeof(temp_cred));
temp_cred.cr_uid = cp->gss_svc_uid;
bcopy(cp->gss_svc_gids, temp_cred.cr_groups,
sizeof(gid_t) * cp->gss_svc_ngroups);
temp_cred.cr_ngroups = cp->gss_svc_ngroups;
nd->nd_cr = kauth_cred_create(&temp_cred);
if (nd->nd_cr == NULL) {
error = ENOMEM;
goto nfsmout;
}
clock_interval_to_deadline(GSS_CTX_EXPIRE, NSEC_PER_SEC,
&cp->gss_svc_expiretime);
switch (service) {
case RPCSEC_GSS_SVC_NONE:
break;
case RPCSEC_GSS_SVC_INTEGRITY:
nfsm_chain_get_32(error, nmc, arglen); if (arglen > NFS_MAXPACKET) {
error = EBADRPC;
goto nfsmout;
}
start = nfsm_chain_offset(nmc);
nfs_gss_cksum_chain(cp->gss_svc_sched, nmc, krb5_mic, start, arglen, cksum1);
nfsm_chain_get_32(error, nmc, seqnum);
if (seqnum != nd->nd_gss_seqnum) {
error = EBADRPC; goto nfsmout;
}
nmc_tmp = *nmc;
arglen -= NFSX_UNSIGNED; nfsm_chain_adv(error, &nmc_tmp, arglen); nfsm_chain_get_32(error, &nmc_tmp, cksumlen); if (cksumlen != KRB5_SZ_TOKEN) {
error = EBADRPC;
goto nfsmout;
}
nfsm_chain_get_opaque(error, &nmc_tmp, cksumlen, tokbuf);
if (error)
goto nfsmout;
error = nfs_gss_token_get(cp->gss_svc_sched, krb5_mic, tokbuf, 1,
NULL, cksum2);
if (error || bcmp(cksum1, cksum2, 8) != 0) {
error = EBADRPC;
goto nfsmout;
}
break;
case RPCSEC_GSS_SVC_PRIVACY:
nfsm_chain_get_32(error, nmc, arglen); if (arglen > NFS_MAXPACKET) {
error = EBADRPC;
goto nfsmout;
}
nfsm_chain_get_opaque(error, nmc, KRB5_SZ_TOKMAX, tokbuf);
if (error)
goto nfsmout;
error = nfs_gss_token_get(cp->gss_svc_sched, krb5_wrap, tokbuf, 1,
&toklen, cksum1);
if (error)
goto nfsmout;
nfsm_chain_reverse(nmc, nfsm_pad(toklen));
start = nfsm_chain_offset(nmc);
arglen -= toklen;
nfs_gss_encrypt_chain(cp->gss_svc_skey, nmc, start, arglen, DES_DECRYPT);
nfs_gss_cksum_chain(cp->gss_svc_sched, nmc, krb5_wrap, start, arglen, cksum2);
if (bcmp(cksum1, cksum2, 8) != 0) {
error = EBADRPC;
goto nfsmout;
}
nfsm_chain_adv(error, nmc, 8); nfsm_chain_get_32(error, nmc, seqnum);
if (seqnum != nd->nd_gss_seqnum) {
error = EBADRPC; goto nfsmout;
}
break;
}
} else {
nfsm_chain_get_32(error, nmc, flavor);
nfsm_chain_get_32(error, nmc, verflen);
if (error || flavor != RPCAUTH_NULL || verflen > 0)
error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
if (error)
goto nfsmout;
}
nd->nd_gss_context = cp;
nfsmout:
return (error);
}
int
nfs_gss_svc_verf_put(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
{
struct nfs_gss_svc_ctx *cp;
int error = 0;
u_char tokbuf[KRB5_SZ_TOKEN];
int toklen;
u_char cksum[8];
cp = nd->nd_gss_context;
if (cp->gss_svc_major != GSS_S_COMPLETE) {
nfsm_chain_add_32(error, nmc, RPCAUTH_NULL);
nfsm_chain_add_32(error, nmc, 0);
return (error);
}
if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT)
nfs_gss_cksum_rep(cp->gss_svc_sched, cp->gss_svc_seqwin, cksum);
else
nfs_gss_cksum_rep(cp->gss_svc_sched, nd->nd_gss_seqnum, cksum);
toklen = nfs_gss_token_put(cp->gss_svc_sched, krb5_mic, tokbuf, 0, 0, cksum);
nfsm_chain_add_32(error, nmc, RPCSEC_GSS);
nfsm_chain_add_32(error, nmc, toklen);
nfsm_chain_add_opaque(error, nmc, tokbuf, toklen);
return (error);
}
int
nfs_gss_svc_prepare_reply(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
{
struct nfs_gss_svc_ctx *cp = nd->nd_gss_context;
int error = 0;
if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT)
return (0);
switch (nd->nd_sec) {
case RPCAUTH_KRB5:
break;
case RPCAUTH_KRB5I:
nd->nd_gss_mb = nmc->nmc_mcur; nfsm_chain_finish_mbuf(error, nmc); nfsm_chain_add_32(error, nmc, nd->nd_gss_seqnum); break;
case RPCAUTH_KRB5P:
nd->nd_gss_mb = nmc->nmc_mcur; nfsm_chain_finish_mbuf(error, nmc); nfsm_chain_add_32(error, nmc, random()); nfsm_chain_add_32(error, nmc, random()); nfsm_chain_add_32(error, nmc, nd->nd_gss_seqnum); break;
}
return (error);
}
int
nfs_gss_svc_protect_reply(struct nfsrv_descript *nd, mbuf_t mrep)
{
struct nfs_gss_svc_ctx *cp = nd->nd_gss_context;
struct nfsm_chain nmrep_res, *nmc_res = &nmrep_res;
struct nfsm_chain nmrep_pre, *nmc_pre = &nmrep_pre;
mbuf_t mb, results;
uint32_t reslen;
u_char tokbuf[KRB5_SZ_TOKMAX];
int pad, toklen;
u_char cksum[8];
int error = 0;
nfs_gss_nfsm_chain(nmc_res, mrep); mb = nd->nd_gss_mb; results = mbuf_next(mb); reslen = nfs_gss_mchain_length(results); error = mbuf_setnext(mb, NULL); if (error)
return (error);
nfs_gss_nfsm_chain(nmc_pre, mb);
if (nd->nd_sec == RPCAUTH_KRB5I) {
nfsm_chain_add_32(error, nmc_pre, reslen);
nfsm_chain_build_done(error, nmc_pre);
if (error)
return (error);
nfs_gss_append_chain(nmc_pre, results);
nfs_gss_cksum_mchain(cp->gss_svc_sched, results, krb5_mic, 0, reslen, cksum);
toklen = nfs_gss_token_put(cp->gss_svc_sched, krb5_mic, tokbuf, 0, 0, cksum);
nfsm_chain_add_32(error, nmc_res, toklen);
nfsm_chain_add_opaque(error, nmc_res, tokbuf, toklen);
nfsm_chain_build_done(error, nmc_res);
} else {
if (reslen % 8 > 0) {
nfsm_chain_add_32(error, nmc_res, 0x04040404);
reslen += NFSX_UNSIGNED;
} else {
nfsm_chain_add_32(error, nmc_res, 0x08080808);
nfsm_chain_add_32(error, nmc_res, 0x08080808);
reslen += 2 * NFSX_UNSIGNED;
}
nfsm_chain_build_done(error, nmc_res);
nfs_gss_cksum_mchain(cp->gss_svc_sched, results, krb5_wrap, 0, reslen, cksum);
toklen = nfs_gss_token_put(cp->gss_svc_sched, krb5_wrap, tokbuf, 0, reslen, cksum);
nfsm_chain_add_32(error, nmc_pre, toklen + reslen);
nfsm_chain_add_opaque_nopad(error, nmc_pre, tokbuf, toklen);
nfsm_chain_build_done(error, nmc_pre);
if (error)
return (error);
nfs_gss_append_chain(nmc_pre, results);
nfs_gss_encrypt_mchain(cp->gss_svc_skey, results, 0, reslen, DES_ENCRYPT);
pad = nfsm_pad(toklen + reslen);
if (pad > 0) {
nfsm_chain_add_opaque_nopad(error, nmc_pre, iv0, pad);
nfsm_chain_build_done(error, nmc_pre);
}
}
return (error);
}
int
nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t *mrepp)
{
struct nfs_gss_svc_ctx *cp = NULL;
uint32_t handle = 0;
int error = 0;
int autherr = 0;
struct nfsm_chain *nmreq, nmrep;
int sz;
nmreq = &nd->nd_nmreq;
nfsm_chain_null(&nmrep);
*mrepp = NULL;
cp = nd->nd_gss_context;
nd->nd_repstat = 0;
switch (cp->gss_svc_proc) {
case RPCSEC_GSS_INIT:
do {
handle = random();
} while (nfs_gss_svc_ctx_find(handle) != NULL || handle == 0);
cp->gss_svc_handle = handle;
cp->gss_svc_mtx = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL);
clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
&cp->gss_svc_expiretime);
nfs_gss_svc_ctx_insert(cp);
case RPCSEC_GSS_CONTINUE_INIT:
nfsm_chain_get_32(error, nmreq, cp->gss_svc_tokenlen);
if (cp->gss_svc_tokenlen == 0) {
autherr = RPCSEC_GSS_CREDPROBLEM;
break;
}
MALLOC(cp->gss_svc_token, u_char *, cp->gss_svc_tokenlen, M_TEMP, M_WAITOK);
if (cp->gss_svc_token == NULL) {
autherr = RPCSEC_GSS_CREDPROBLEM;
break;
}
nfsm_chain_get_opaque(error, nmreq, cp->gss_svc_tokenlen, cp->gss_svc_token);
error = nfs_gss_svc_gssd_upcall(cp);
if (error) {
autherr = RPCSEC_GSS_CREDPROBLEM;
if (error == EAUTH)
error = 0;
break;
}
if (cp->gss_svc_major != GSS_S_COMPLETE)
break;
clock_interval_to_deadline(GSS_CTX_EXPIRE, NSEC_PER_SEC,
&cp->gss_svc_expiretime);
cp->gss_svc_seqwin = GSS_SVC_SEQWINDOW;
MALLOC(cp->gss_svc_seqbits, uint32_t *,
nfsm_rndup((cp->gss_svc_seqwin + 7) / 8), M_TEMP, M_WAITOK|M_ZERO);
if (cp->gss_svc_seqbits == NULL) {
autherr = RPCSEC_GSS_CREDPROBLEM;
break;
}
error = des_key_sched((des_cblock *) cp->gss_svc_skey, cp->gss_svc_sched);
if (error) {
autherr = RPCSEC_GSS_CREDPROBLEM;
error = 0;
break;
}
break;
case RPCSEC_GSS_DATA:
break;
case RPCSEC_GSS_DESTROY:
cp = nfs_gss_svc_ctx_find(cp->gss_svc_handle);
if (cp != NULL) {
cp->gss_svc_handle = 0; lck_mtx_lock(cp->gss_svc_mtx);
clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
&cp->gss_svc_expiretime);
lck_mtx_unlock(cp->gss_svc_mtx);
}
break;
default:
autherr = RPCSEC_GSS_CREDPROBLEM;
break;
}
if (nd->nd_repstat == 0)
nd->nd_repstat = autherr ? (NFSERR_AUTHERR | autherr) : NFSERR_RETVOID;
sz = 7 * NFSX_UNSIGNED + nfsm_rndup(cp->gss_svc_tokenlen); error = nfsrv_rephead(nd, slp, &nmrep, sz);
*mrepp = nmrep.nmc_mhead;
if (error || autherr)
goto nfsmout;
if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) {
nfsm_chain_add_32(error, &nmrep, sizeof(cp->gss_svc_handle));
nfsm_chain_add_32(error, &nmrep, cp->gss_svc_handle);
nfsm_chain_add_32(error, &nmrep, cp->gss_svc_major);
nfsm_chain_add_32(error, &nmrep, cp->gss_svc_minor);
nfsm_chain_add_32(error, &nmrep, cp->gss_svc_seqwin);
nfsm_chain_add_32(error, &nmrep, cp->gss_svc_tokenlen);
nfsm_chain_add_opaque(error, &nmrep, cp->gss_svc_token, cp->gss_svc_tokenlen);
if (cp->gss_svc_token != NULL) {
FREE(cp->gss_svc_token, M_TEMP);
cp->gss_svc_token = NULL;
}
}
nfsmout:
if (autherr != 0) {
LIST_REMOVE(cp, gss_svc_entries);
if (cp->gss_svc_seqbits != NULL)
FREE(cp->gss_svc_seqbits, M_TEMP);
if (cp->gss_svc_token != NULL)
FREE(cp->gss_svc_token, M_TEMP);
lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
FREE(cp, M_TEMP);
}
nfsm_chain_build_done(error, &nmrep);
if (error) {
nfsm_chain_cleanup(&nmrep);
*mrepp = NULL;
}
return (error);
}
static int
nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *cp)
{
kern_return_t kr;
mach_port_t mp;
int retry_cnt = 0;
byte_buffer okey = NULL;
uint32_t skeylen = 0;
vm_map_copy_t itoken = NULL;
byte_buffer otoken = NULL;
int error = 0;
char svcname[] = "nfs";
kr = task_get_gssd_port(get_threadtask(current_thread()), &mp);
if (kr != KERN_SUCCESS) {
printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status 0x%08x\n", kr);
return (EAUTH);
}
if (!IPC_PORT_VALID(mp)) {
printf("nfs_gss_svc_gssd_upcall: gssd port not valid\n");
return (EAUTH);
}
if (cp->gss_svc_tokenlen > 0)
nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken);
retry:
kr = mach_gss_accept_sec_context(
mp,
(byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_svc_tokenlen,
svcname,
0,
&cp->gss_svc_gssd_verf,
&cp->gss_svc_context,
&cp->gss_svc_cred_handle,
&cp->gss_svc_uid,
cp->gss_svc_gids,
&cp->gss_svc_ngroups,
&okey, (mach_msg_type_number_t *) &skeylen,
&otoken, (mach_msg_type_number_t *) &cp->gss_svc_tokenlen,
&cp->gss_svc_major,
&cp->gss_svc_minor);
if (kr != KERN_SUCCESS) {
printf("nfs_gss_svc_gssd_upcall failed: %d\n", kr);
if (kr == MIG_SERVER_DIED && cp->gss_svc_context == 0 &&
retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES)
goto retry;
task_release_special_port(mp);
return (EAUTH);
}
task_release_special_port(mp);
if (skeylen > 0) {
if (skeylen != SKEYLEN) {
printf("nfs_gss_svc_gssd_upcall: bad key length (%d)\n", skeylen);
return (EAUTH);
}
error = nfs_gss_mach_vmcopyout((vm_map_copy_t) okey, skeylen, cp->gss_svc_skey);
if (error)
return (EAUTH);
}
if (cp->gss_svc_tokenlen > 0) {
MALLOC(cp->gss_svc_token, u_char *, cp->gss_svc_tokenlen, M_TEMP, M_WAITOK);
if (cp->gss_svc_token == NULL)
return (ENOMEM);
error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, cp->gss_svc_tokenlen,
cp->gss_svc_token);
if (error)
return (EAUTH);
}
return (kr);
}
static int
nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *cp, uint32_t seq)
{
uint32_t *bits = cp->gss_svc_seqbits;
uint32_t win = cp->gss_svc_seqwin;
uint32_t i;
lck_mtx_lock(cp->gss_svc_mtx);
if (seq > cp->gss_svc_seqmax) {
if (seq - cp->gss_svc_seqmax > win)
bzero(bits, nfsm_rndup((win + 7) / 8));
else
for (i = cp->gss_svc_seqmax + 1; i < seq; i++)
win_resetbit(bits, i % win);
win_setbit(bits, seq % win);
cp->gss_svc_seqmax = seq;
lck_mtx_unlock(cp->gss_svc_mtx);
return (1);
}
if (seq <= cp->gss_svc_seqmax - win) {
lck_mtx_unlock(cp->gss_svc_mtx);
return (0);
}
if (win_getbit(bits, seq % win)) {
lck_mtx_unlock(cp->gss_svc_mtx);
return (0);
}
win_setbit(bits, seq % win);
lck_mtx_unlock(cp->gss_svc_mtx);
return (1);
}
void
nfs_gss_svc_cleanup(void)
{
struct nfs_gss_svc_ctx_hashhead *head;
struct nfs_gss_svc_ctx *cp, *ncp;
int i;
lck_mtx_lock(nfs_gss_svc_ctx_mutex);
for (i = 0; i < SVC_CTX_HASHSZ; i++) {
head = &nfs_gss_svc_ctx_hashtbl[i];
LIST_FOREACH_SAFE(cp, head, gss_svc_entries, ncp) {
LIST_REMOVE(cp, gss_svc_entries);
if (cp->gss_svc_seqbits)
FREE(cp->gss_svc_seqbits, M_TEMP);
lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp);
FREE(cp, M_TEMP);
}
}
lck_mtx_unlock(nfs_gss_svc_ctx_mutex);
}
#endif
extern void ipc_port_release_send(ipc_port_t);
extern ipc_port_t ipc_port_copy_send(ipc_port_t);
static void
task_release_special_port(mach_port_t mp)
{
ipc_port_release_send(mp);
}
static mach_port_t
task_copy_special_port(mach_port_t mp)
{
return ipc_port_copy_send(mp);
}
static void
nfs_gss_mach_alloc_buffer(u_char *buf, uint32_t buflen, vm_map_copy_t *addr)
{
kern_return_t kr;
vm_offset_t kmem_buf;
vm_size_t tbuflen;
*addr = NULL;
if (buf == NULL || buflen == 0)
return;
tbuflen = round_page(buflen);
kr = vm_allocate(ipc_kernel_map, &kmem_buf, tbuflen, VM_FLAGS_ANYWHERE);
if (kr != 0) {
printf("nfs_gss_mach_alloc_buffer: vm_allocate failed\n");
return;
}
kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(kmem_buf),
vm_map_round_page(kmem_buf + tbuflen),
VM_PROT_READ|VM_PROT_WRITE, FALSE);
bcopy(buf, (void *) kmem_buf, buflen);
kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(kmem_buf),
vm_map_round_page(kmem_buf + tbuflen), FALSE);
if (kr != 0) {
printf("nfs_gss_mach_alloc_buffer: vm_map_unwire failed\n");
return;
}
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t) kmem_buf,
(vm_map_size_t) buflen, TRUE, addr);
if (kr != 0) {
printf("nfs_gss_mach_alloc_buffer: vm_map_copyin failed\n");
return;
}
if (buflen != tbuflen)
kmem_free(ipc_kernel_map, kmem_buf + buflen, tbuflen - buflen);
}
static int
nfs_gss_mach_vmcopyout(vm_map_copy_t in, uint32_t len, u_char *out)
{
vm_map_offset_t map_data;
vm_offset_t data;
int error;
error = vm_map_copyout(ipc_kernel_map, &map_data, in);
if (error)
return (error);
data = CAST_DOWN(vm_offset_t, map_data);
bcopy((void *) data, out, len);
vm_deallocate(ipc_kernel_map, data, len);
return (0);
}
static int
nfs_gss_token_put(
des_key_schedule sched,
u_char *alg,
u_char *p,
int initiator,
int datalen,
u_char *cksum)
{
static uint32_t seqnum = 0;
u_char *psave = p;
u_char plain[8];
int toklen, i;
*p++ = 0x060;
toklen = KRB5_SZ_MECH + KRB5_SZ_ALG + KRB5_SZ_SEQ + KRB5_SZ_CKSUM;
nfs_gss_der_length_put(&p, toklen + datalen);
bcopy(krb5_mech, p, sizeof(krb5_mech));
p += sizeof(krb5_mech);
bcopy(alg, p, KRB5_SZ_ALG);
p += KRB5_SZ_ALG;
seqnum++;
for (i = 0; i < 4; i++)
plain[i] = (u_char) ((seqnum >> (i * 8)) & 0xff);
for (i = 4; i < 8; i++)
plain[i] = initiator ? 0x00 : 0xff;
des_cbc_encrypt((des_cblock *) plain, (des_cblock *) p, 8,
sched, (des_cblock *) cksum, NULL, DES_ENCRYPT);
p += 8;
bcopy(cksum, p, 8);
p += 8;
return (p - psave);
}
static int
nfs_gss_der_length_size(int len)
{
return
len < (1 << 7) ? 1 :
len < (1 << 8) ? 2 :
len < (1 << 16) ? 3 :
len < (1 << 24) ? 4 : 5;
}
static void
nfs_gss_der_length_put(u_char **pp, int len)
{
int sz = nfs_gss_der_length_size(len);
u_char *p = *pp;
if (sz == 1) {
*p++ = (u_char) len;
} else {
*p++ = (u_char) ((sz-1) | 0x80);
sz -= 1;
while (sz--)
*p++ = (u_char) ((len >> (sz * 8)) & 0xff);
}
*pp = p;
}
static int
nfs_gss_der_length_get(u_char **pp)
{
u_char *p = *pp;
uint32_t flen, len = 0;
flen = *p & 0x7f;
if ((*p++ & 0x80) == 0)
len = flen;
else {
if (flen > sizeof(uint32_t))
return (-1);
while (flen--)
len = (len << 8) + *p++;
}
*pp = p;
return (len);
}
static int
nfs_gss_token_get(
des_key_schedule sched,
u_char *alg,
u_char *p,
int initiator,
uint32_t *len,
u_char *cksum)
{
u_char d, plain[8];
u_char *psave = p;
int seqnum, i;
if (*p++ != 0x60)
return (AUTH_BADCRED);
(void) nfs_gss_der_length_get(&p);
if (bcmp(p, krb5_mech, sizeof(krb5_mech) != 0))
return (AUTH_BADCRED);
p += sizeof(krb5_mech);
if (bcmp(p, alg, KRB5_SZ_ALG) != 0)
return (AUTH_BADCRED);
p += KRB5_SZ_ALG;
seqnum = 0;
des_cbc_encrypt((des_cblock *) p, (des_cblock *) plain, 8,
sched, (des_cblock *) (p + 8), NULL, DES_DECRYPT);
p += 8;
for (i = 0; i < 4; i++)
seqnum |= plain[i] << (i * 8);
d = initiator ? 0x00 : 0xff;
for (i = 4; i < 8; i++)
if (plain[i] != d)
return (AUTH_BADCRED);
bcopy(p, cksum, 8);
p += 8;
if (len != NULL)
*len = p - psave;
return (0);
}
static int
nfs_gss_mchain_length(mbuf_t mhead)
{
mbuf_t mb;
int len = 0;
for (mb = mhead; mb; mb = mbuf_next(mb))
len += mbuf_len(mb);
return (len);
}
static int
nfs_gss_append_chain(struct nfsm_chain *nmc, mbuf_t mc)
{
int error = 0;
mbuf_t mb, tail;
error = mbuf_setnext(nmc->nmc_mcur, mc);
if (error)
return (error);
tail = NULL;
for (mb = mc; mb; mb = mbuf_next(mb))
tail = mb;
nmc->nmc_mcur = tail;
nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail);
nmc->nmc_left = mbuf_trailingspace(tail);
return (0);
}
static void
nfs_gss_nfsm_chain(struct nfsm_chain *nmc, mbuf_t mc)
{
mbuf_t mb, tail;
tail = NULL;
for (mb = mc; mb; mb = mbuf_next(mb))
tail = mb;
nmc->nmc_mhead = mc;
nmc->nmc_mcur = tail;
nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail);
nmc->nmc_left = mbuf_trailingspace(tail);
nmc->nmc_flags = 0;
}
static void
nfs_gss_cksum_mchain(
des_key_schedule sched,
mbuf_t mhead,
u_char *alg,
int offset,
int len,
u_char *cksum)
{
mbuf_t mb;
u_char *ptr;
int left, bytes;
MD5_CTX context;
u_char digest[16];
MD5Init(&context);
MD5Update(&context, alg, KRB5_SZ_ALG);
for (mb = mhead; mb && len > 0; mb = mbuf_next(mb)) {
ptr = mbuf_data(mb);
left = mbuf_len(mb);
if (offset >= left) {
offset -= left;
continue;
}
ptr += offset;
left -= offset;
offset = 0;
bytes = left < len ? left : len;
if (bytes > 0)
MD5Update(&context, ptr, bytes);
len -= bytes;
}
MD5Final(digest, &context);
(void) des_cbc_cksum((des_cblock *) digest, (des_cblock *) cksum,
sizeof(digest), sched, (des_cblock *) iv0);
}
static void
nfs_gss_cksum_chain(
des_key_schedule sched,
struct nfsm_chain *nmc,
u_char *alg,
int offset,
int len,
u_char *cksum)
{
if (len == 0)
len = nfsm_chain_offset(nmc) - offset;
return (nfs_gss_cksum_mchain(sched, nmc->nmc_mhead, alg, offset, len, cksum));
}
static void
nfs_gss_cksum_rep(des_key_schedule sched, uint32_t seqnum, u_char *cksum)
{
MD5_CTX context;
u_char digest[16];
uint32_t val = htonl(seqnum);
MD5Init(&context);
MD5Update(&context, krb5_mic, KRB5_SZ_ALG);
MD5Update(&context, (u_char *) &val, 4);
MD5Final(digest, &context);
(void) des_cbc_cksum((des_cblock *) digest, (des_cblock *) cksum,
sizeof(digest), sched, (des_cblock *) iv0);
}
static void
nfs_gss_encrypt_mchain(
u_char *key,
mbuf_t mhead,
int offset,
int len,
int encrypt)
{
des_key_schedule sched;
mbuf_t mb, mbn;
u_char *ptr, *nptr;
u_char tmp[8], ivec[8];
int i, left, left8, remain;
for (i = 0; i < 8; i++)
tmp[i] = key[i] ^ 0xf0;
bzero(ivec, 8);
(void) des_key_sched((des_cblock *) tmp, sched);
for (mb = mhead; mb && len > 0; mb = mbn) {
mbn = mbuf_next(mb);
ptr = mbuf_data(mb);
left = mbuf_len(mb);
if (offset >= left) {
offset -= left;
continue;
}
ptr += offset;
left -= offset;
offset = 0;
remain = left % 8;
left8 = left - remain;
left = left8 < len ? left8 : len;
if (left > 0) {
des_cbc_encrypt((des_cblock *) ptr, (des_cblock *) ptr, left, sched,
(des_cblock *) ivec, (des_cblock *) ivec, encrypt);
len -= left;
}
if (mbn && remain > 0) {
nptr = mbuf_data(mbn);
offset = 8 - remain;
bcopy(ptr + left, tmp, remain); bcopy(nptr, tmp + remain, offset); des_cbc_encrypt((des_cblock *) tmp, (des_cblock *) tmp, 8, sched,
(des_cblock *) ivec, (des_cblock *) ivec, encrypt);
bcopy(tmp, ptr + left, remain); bcopy(tmp + remain, nptr, offset); len -= 8;
}
}
}
static void
nfs_gss_encrypt_chain(
u_char *key,
struct nfsm_chain *nmc,
int offset,
int len,
int encrypt)
{
if (len == 0)
len = nfsm_chain_offset(nmc) - offset;
return (nfs_gss_encrypt_mchain(key, nmc->nmc_mhead, offset, len, encrypt));
}
static DES_LONG
des_cbc_cksum(input, output, length, schedule, ivec)
des_cblock (*input);
des_cblock (*output);
long length;
des_key_schedule schedule;
des_cblock (*ivec);
{
register unsigned long tout0,tout1,tin0,tin1;
register long l=length;
unsigned long tin[2];
unsigned char *in,*out,*iv;
in=(unsigned char *)input;
out=(unsigned char *)output;
iv=(unsigned char *)ivec;
c2l(iv,tout0);
c2l(iv,tout1);
for (; l>0; l-=8) {
if (l >= 8) {
c2l(in,tin0);
c2l(in,tin1);
} else
c2ln(in,tin0,tin1,l);
tin0^=tout0; tin[0]=tin0;
tin1^=tout1; tin[1]=tin1;
des_encrypt1((DES_LONG *)tin,schedule,DES_ENCRYPT);
tout0=tin[0];
tout1=tin[1];
}
if (out != NULL) {
l2c(tout0,out);
l2c(tout1,out);
}
tout0=tin0=tin1=tin[0]=tin[1]=0;
return(tout1);
}
static void
des_cbc_encrypt(input, output, length, schedule, ivec, retvec, encrypt)
des_cblock (*input);
des_cblock (*output);
long length;
des_key_schedule schedule;
des_cblock (*ivec);
des_cblock (*retvec);
int encrypt;
{
register unsigned long tin0,tin1;
register unsigned long tout0,tout1,xor0,xor1;
register unsigned char *in,*out,*retval;
register long l=length;
unsigned long tin[2];
unsigned char *iv;
tin0 = tin1 = 0;
in=(unsigned char *)input;
out=(unsigned char *)output;
retval=(unsigned char *)retvec;
iv=(unsigned char *)ivec;
if (encrypt) {
c2l(iv,tout0);
c2l(iv,tout1);
for (l-=8; l>=0; l-=8) {
c2l(in,tin0);
c2l(in,tin1);
tin0^=tout0; tin[0]=tin0;
tin1^=tout1; tin[1]=tin1;
des_encrypt1((DES_LONG *)tin,schedule,DES_ENCRYPT);
tout0=tin[0]; l2c(tout0,out);
tout1=tin[1]; l2c(tout1,out);
}
if (l != -8) {
c2ln(in,tin0,tin1,l+8);
tin0^=tout0; tin[0]=tin0;
tin1^=tout1; tin[1]=tin1;
des_encrypt1((DES_LONG *)tin,schedule,DES_ENCRYPT);
tout0=tin[0]; l2c(tout0,out);
tout1=tin[1]; l2c(tout1,out);
}
if (retval) {
l2c(tout0,retval);
l2c(tout1,retval);
}
} else {
c2l(iv,xor0);
c2l(iv,xor1);
for (l-=8; l>=0; l-=8) {
c2l(in,tin0); tin[0]=tin0;
c2l(in,tin1); tin[1]=tin1;
des_encrypt1((DES_LONG *)tin,schedule,DES_DECRYPT);
tout0=tin[0]^xor0;
tout1=tin[1]^xor1;
l2c(tout0,out);
l2c(tout1,out);
xor0=tin0;
xor1=tin1;
}
if (l != -8) {
c2l(in,tin0); tin[0]=tin0;
c2l(in,tin1); tin[1]=tin1;
des_encrypt1((DES_LONG *)tin,schedule,DES_DECRYPT);
tout0=tin[0]^xor0;
tout1=tin[1]^xor1;
l2cn(tout0,tout1,out,l+8);
}
if (retval) {
l2c(tin0,retval);
l2c(tin1,retval);
}
}
tin0=tin1=tout0=tout1=xor0=xor1=0;
tin[0]=tin[1]=0;
}