#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/domain.h>
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/proc.h>
#ifndef __APPLE__
#include <sys/jail.h>
#endif
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <sys/mcache.h>
#include <sys/kauth.h>
#include <sys/priv.h>
#include <libkern/OSAtomic.h>
#include <kern/locks.h>
#include <machine/limits.h>
#ifdef __APPLE__
#include <kern/zalloc.h>
#endif
#include <net/if.h>
#include <net/if_types.h>
#include <net/route.h>
#include <net/flowhash.h>
#include <net/flowadv.h>
#include <netinet/in.h>
#include <netinet/in_pcb.h>
#include <netinet/in_var.h>
#include <netinet/ip_var.h>
#if INET6
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
#endif
#if IPSEC
#include <netinet6/ipsec.h>
#include <netkey/key.h>
#endif
#include <sys/kdebug.h>
#include <sys/random.h>
#include <dev/random/randomdev.h>
#if IPSEC
extern int ipsec_bypass;
#endif
#define DBG_FNC_PCB_LOOKUP NETDBG_CODE(DBG_NETTCP, (6 << 8))
#define DBG_FNC_PCB_HLOOKUP NETDBG_CODE(DBG_NETTCP, ((6 << 8) | 1))
struct in_addr zeroin_addr;
int ipport_lowfirstauto = IPPORT_RESERVED - 1;
int ipport_lowlastauto = IPPORT_RESERVEDSTART;
#ifndef __APPLE__
int ipport_firstauto = IPPORT_RESERVED;
int ipport_lastauto = IPPORT_USERRESERVED;
#else
int ipport_firstauto = IPPORT_HIFIRSTAUTO;
int ipport_lastauto = IPPORT_HILASTAUTO;
#endif
int ipport_hifirstauto = IPPORT_HIFIRSTAUTO;
int ipport_hilastauto = IPPORT_HILASTAUTO;
#define RANGECHK(var, min, max) \
if ((var) < (min)) { (var) = (min); } \
else if ((var) > (max)) { (var) = (max); }
static int
sysctl_net_ipport_check SYSCTL_HANDLER_ARGS
{
#pragma unused(arg1, arg2)
int error = sysctl_handle_int(oidp,
oidp->oid_arg1, oidp->oid_arg2, req);
if (!error) {
RANGECHK(ipport_lowfirstauto, 1, IPPORT_RESERVED - 1);
RANGECHK(ipport_lowlastauto, 1, IPPORT_RESERVED - 1);
RANGECHK(ipport_firstauto, IPPORT_RESERVED, USHRT_MAX);
RANGECHK(ipport_lastauto, IPPORT_RESERVED, USHRT_MAX);
RANGECHK(ipport_hifirstauto, IPPORT_RESERVED, USHRT_MAX);
RANGECHK(ipport_hilastauto, IPPORT_RESERVED, USHRT_MAX);
}
return error;
}
#undef RANGECHK
SYSCTL_NODE(_net_inet_ip, IPPROTO_IP, portrange, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "IP Ports");
SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowfirst, CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
&ipport_lowfirstauto, 0, &sysctl_net_ipport_check, "I", "");
SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowlast, CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
&ipport_lowlastauto, 0, &sysctl_net_ipport_check, "I", "");
SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, first, CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
&ipport_firstauto, 0, &sysctl_net_ipport_check, "I", "");
SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, last, CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
&ipport_lastauto, 0, &sysctl_net_ipport_check, "I", "");
SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hifirst, CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
&ipport_hifirstauto, 0, &sysctl_net_ipport_check, "I", "");
SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hilast, CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
&ipport_hilastauto, 0, &sysctl_net_ipport_check, "I", "");
extern int udp_use_randomport;
extern int tcp_use_randomport;
struct inp_flowhash_key_addr {
union {
struct in_addr v4;
struct in6_addr v6;
u_int8_t addr8[16];
u_int16_t addr16[8];
u_int32_t addr32[4];
} infha;
};
struct inp_flowhash_key {
struct inp_flowhash_key_addr infh_laddr;
struct inp_flowhash_key_addr infh_faddr;
u_int32_t infh_lport;
u_int32_t infh_fport;
u_int32_t infh_af;
u_int32_t infh_proto;
u_int32_t infh_rand1;
u_int32_t infh_rand2;
};
u_int32_t inp_hash_seed = 0;
static __inline int infc_cmp(const struct inp_fc_entry *,
const struct inp_fc_entry *);
lck_grp_t *inp_lck_grp;
lck_grp_attr_t *inp_lck_grp_attr;
lck_attr_t *inp_lck_attr;
decl_lck_mtx_data(, inp_fc_lck);
RB_HEAD(inp_fc_tree, inp_fc_entry) inp_fc_tree;
RB_PROTOTYPE(inp_fc_tree, inp_fc_entry, infc_link, infc_cmp);
RB_GENERATE(inp_fc_tree, inp_fc_entry, infc_link, infc_cmp);
static unsigned int inp_fcezone_size;
static struct zone *inp_fcezone;
#define INP_FCEZONE_NAME "inp_fcezone"
#define INP_FCEZONE_MAX 32
void
socket_flowadv_init(void)
{
inp_lck_grp_attr = lck_grp_attr_alloc_init();
inp_lck_grp = lck_grp_alloc_init("inp_lck_grp", inp_lck_grp_attr);
inp_lck_attr = lck_attr_alloc_init();
lck_mtx_init(&inp_fc_lck, inp_lck_grp, inp_lck_attr);
RB_INIT(&inp_fc_tree);
inp_fcezone_size = P2ROUNDUP(sizeof (struct inp_fc_entry),
sizeof (u_int64_t));
inp_fcezone = zinit(inp_fcezone_size,
INP_FCEZONE_MAX * inp_fcezone_size, 0, INP_FCEZONE_NAME);
if (inp_fcezone == NULL) {
panic("%s: failed allocating %s", __func__,
INP_FCEZONE_NAME);
}
zone_change(inp_fcezone, Z_EXPAND, TRUE);
zone_change(inp_fcezone, Z_CALLERACCT, FALSE);
}
int
in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo, __unused struct proc *p)
{
struct inpcb *inp;
caddr_t temp;
#if IPSEC
#ifndef __APPLE__
int error;
#endif
#endif
#if CONFIG_MACF_NET
int mac_error;
#endif
if (so->cached_in_sock_layer == 0) {
#if TEMPDEBUG
printf("PCBALLOC calling zalloc for socket %x\n", so);
#endif
inp = (struct inpcb *) zalloc(pcbinfo->ipi_zone);
if (inp == NULL)
return (ENOBUFS);
bzero((caddr_t)inp, sizeof(*inp));
}
else {
#if TEMPDEBUG
printf("PCBALLOC reusing PCB for socket %x\n", so);
#endif
inp = (struct inpcb *)(void *)so->so_saved_pcb;
temp = inp->inp_saved_ppcb;
bzero((caddr_t) inp, sizeof(*inp));
inp->inp_saved_ppcb = temp;
}
inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
inp->inp_pcbinfo = pcbinfo;
inp->inp_socket = so;
#if CONFIG_MACF_NET
mac_error = mac_inpcb_label_init(inp, M_WAITOK);
if (mac_error != 0) {
if (so->cached_in_sock_layer == 0)
zfree(pcbinfo->ipi_zone, inp);
return (mac_error);
}
mac_inpcb_label_associate(so, inp);
#endif
inp->inp_stat = (struct inp_stat*)P2ROUNDUP(inp->inp_stat_store, sizeof(u_int64_t));
if (((uintptr_t)inp->inp_stat - (uintptr_t)inp->inp_stat_store)
+ sizeof(*inp->inp_stat) > sizeof(inp->inp_stat_store)) {
panic("insufficient space to align inp_stat");
}
so->so_pcb = (caddr_t)inp;
if (so->so_proto->pr_flags & PR_PCBLOCK) {
lck_mtx_init(&inp->inpcb_mtx, pcbinfo->mtx_grp, pcbinfo->mtx_attr);
}
#if IPSEC
#ifndef __APPLE__
if (ipsec_bypass == 0) {
error = ipsec_init_policy(so, &inp->inp_sp);
if (error != 0) {
zfree(pcbinfo->ipi_zone, inp);
return error;
}
}
#endif
#endif
#if INET6
if (INP_SOCKAF(so) == AF_INET6 && !ip6_mapped_addr_on)
inp->inp_flags |= IN6P_IPV6_V6ONLY;
#endif
#if INET6
if (ip6_auto_flowlabel)
inp->inp_flags |= IN6P_AUTOFLOWLABEL;
#endif
lck_rw_lock_exclusive(pcbinfo->mtx);
inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
LIST_INSERT_HEAD(pcbinfo->listhead, inp, inp_list);
pcbinfo->ipi_count++;
lck_rw_done(pcbinfo->mtx);
return (0);
}
struct inpcb*
in_pcblookup_local_and_cleanup(
struct inpcbinfo *pcbinfo,
struct in_addr laddr,
u_int lport_arg,
int wild_okay)
{
struct inpcb *inp;
inp = in_pcblookup_local(pcbinfo, laddr, lport_arg, wild_okay);
if (inp && inp->inp_wantcnt == WNT_STOPUSING) {
struct socket *so = inp->inp_socket;
lck_mtx_lock(&inp->inpcb_mtx);
if (so->so_usecount == 0) {
if (inp->inp_state != INPCB_STATE_DEAD)
in_pcbdetach(inp);
in_pcbdispose(inp);
inp = NULL;
}
else {
lck_mtx_unlock(&inp->inpcb_mtx);
}
}
return inp;
}
#ifdef __APPLE_API_PRIVATE
static void
in_pcb_conflict_post_msg(u_int16_t port)
{
struct kev_msg ev_msg;
struct kev_in_portinuse in_portinuse;
bzero(&in_portinuse, sizeof(struct kev_in_portinuse));
bzero(&ev_msg, sizeof(struct kev_msg));
in_portinuse.port = ntohs(port);
in_portinuse.req_pid = proc_selfpid();
ev_msg.vendor_code = KEV_VENDOR_APPLE;
ev_msg.kev_class = KEV_NETWORK_CLASS;
ev_msg.kev_subclass = KEV_INET_SUBCLASS;
ev_msg.event_code = KEV_INET_PORTINUSE;
ev_msg.dv[0].data_ptr = &in_portinuse;
ev_msg.dv[0].data_length = sizeof(struct kev_in_portinuse);
ev_msg.dv[1].data_length = 0;
kev_post_msg(&ev_msg);
}
#endif
int
in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p)
{
struct socket *so = inp->inp_socket;
unsigned short *lastport;
struct sockaddr_in *sin;
struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
u_short lport = 0, rand_port = 0;
int wild = 0, reuseport = (so->so_options & SO_REUSEPORT);
int error, randomport, conflict = 0;
kauth_cred_t cred;
if (TAILQ_EMPTY(&in_ifaddrhead))
return (EADDRNOTAVAIL);
if (inp->inp_lport || inp->inp_laddr.s_addr != INADDR_ANY)
return (EINVAL);
if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0)
wild = 1;
socket_unlock(so, 0);
lck_rw_lock_exclusive(pcbinfo->mtx);
if (nam) {
struct ifnet *outif = NULL;
sin = (struct sockaddr_in *)(void *)nam;
if (nam->sa_len != sizeof (*sin)) {
lck_rw_done(pcbinfo->mtx);
socket_lock(so, 0);
return (EINVAL);
}
#ifdef notdef
if (sin->sin_family != AF_INET) {
lck_rw_done(pcbinfo->mtx);
socket_lock(so, 0);
return (EAFNOSUPPORT);
}
#endif
lport = sin->sin_port;
if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
if (so->so_options & SO_REUSEADDR)
reuseport = SO_REUSEADDR|SO_REUSEPORT;
} else if (sin->sin_addr.s_addr != INADDR_ANY) {
struct ifaddr *ifa;
sin->sin_port = 0;
if ((ifa = ifa_ifwithaddr((struct sockaddr *)sin)) == 0) {
lck_rw_done(pcbinfo->mtx);
socket_lock(so, 0);
return (EADDRNOTAVAIL);
}
else {
IFA_LOCK(ifa);
outif = ifa->ifa_ifp;
IFA_UNLOCK(ifa);
IFA_REMREF(ifa);
}
}
if (lport) {
struct inpcb *t;
#if !CONFIG_EMBEDDED
if (ntohs(lport) < IPPORT_RESERVED) {
cred = kauth_cred_proc_ref(p);
error = priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT, 0);
kauth_cred_unref(&cred);
if (error != 0) {
lck_rw_done(pcbinfo->mtx);
socket_lock(so, 0);
return (EACCES);
}
}
#endif
if (kauth_cred_getuid(so->so_cred) &&
!IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
t = in_pcblookup_local_and_cleanup(inp->inp_pcbinfo,
sin->sin_addr, lport, INPLOOKUP_WILDCARD);
if (t &&
(ntohl(sin->sin_addr.s_addr) != INADDR_ANY ||
ntohl(t->inp_laddr.s_addr) != INADDR_ANY ||
(t->inp_socket->so_options &
SO_REUSEPORT) == 0) &&
(kauth_cred_getuid(so->so_cred) !=
kauth_cred_getuid(t->inp_socket->so_cred)) &&
((t->inp_socket->so_flags & SOF_REUSESHAREUID) == 0) &&
(ntohl(sin->sin_addr.s_addr) != INADDR_ANY ||
ntohl(t->inp_laddr.s_addr) != INADDR_ANY))
{
#ifdef __APPLE_API_PRIVATE
if ((t->inp_socket->so_flags & SOF_NOTIFYCONFLICT) && ((so->so_flags & SOF_NOTIFYCONFLICT) == 0))
conflict = 1;
lck_rw_done(pcbinfo->mtx);
if (conflict)
in_pcb_conflict_post_msg(lport);
#else
lck_rw_done(pcbinfo->mtx);
#endif
socket_lock(so, 0);
return (EADDRINUSE);
}
}
t = in_pcblookup_local_and_cleanup(pcbinfo, sin->sin_addr,
lport, wild);
if (t &&
(reuseport & t->inp_socket->so_options) == 0) {
#if INET6
if (ntohl(sin->sin_addr.s_addr) !=
INADDR_ANY ||
ntohl(t->inp_laddr.s_addr) !=
INADDR_ANY ||
INP_SOCKAF(so) != AF_INET6 ||
INP_SOCKAF(t->inp_socket) != AF_INET6)
#endif
{
#ifdef __APPLE_API_PRIVATE
if ((t->inp_socket->so_flags & SOF_NOTIFYCONFLICT) && ((so->so_flags & SOF_NOTIFYCONFLICT) == 0))
conflict = 1;
lck_rw_done(pcbinfo->mtx);
if (conflict)
in_pcb_conflict_post_msg(lport);
#else
lck_rw_done(pcbinfo->mtx);
#endif
socket_lock(so, 0);
return (EADDRINUSE);
}
}
}
inp->inp_laddr = sin->sin_addr;
inp->inp_last_outifp = outif;
}
if (lport == 0) {
u_short first, last;
int count;
randomport = (so->so_flags & SOF_BINDRANDOMPORT) ||
(so->so_type == SOCK_STREAM ? tcp_use_randomport : udp_use_randomport);
inp->inp_flags |= INP_ANONPORT;
if (inp->inp_flags & INP_HIGHPORT) {
first = ipport_hifirstauto;
last = ipport_hilastauto;
lastport = &pcbinfo->lasthi;
} else if (inp->inp_flags & INP_LOWPORT) {
cred = kauth_cred_proc_ref(p);
error = priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT, 0);
kauth_cred_unref(&cred);
if (error != 0) {
lck_rw_done(pcbinfo->mtx);
socket_lock(so, 0);
return error;
}
first = ipport_lowfirstauto;
last = ipport_lowlastauto;
lastport = &pcbinfo->lastlow;
} else {
first = ipport_firstauto;
last = ipport_lastauto;
lastport = &pcbinfo->lastport;
}
if (first == last)
randomport = 0;
if (first > last) {
if (randomport) {
read_random(&rand_port, sizeof(rand_port));
*lastport = first - (rand_port % (first - last));
}
count = first - last;
do {
if (count-- < 0) {
lck_rw_done(pcbinfo->mtx);
socket_lock(so, 0);
inp->inp_laddr.s_addr = INADDR_ANY;
inp->inp_last_outifp = NULL;
return (EADDRNOTAVAIL);
}
--*lastport;
if (*lastport > first || *lastport < last)
*lastport = first;
lport = htons(*lastport);
} while (in_pcblookup_local_and_cleanup(pcbinfo,
inp->inp_laddr, lport, wild));
} else {
if (randomport) {
read_random(&rand_port, sizeof(rand_port));
*lastport = first + (rand_port % (first - last));
}
count = last - first;
do {
if (count-- < 0) {
lck_rw_done(pcbinfo->mtx);
socket_lock(so, 0);
inp->inp_laddr.s_addr = INADDR_ANY;
inp->inp_last_outifp = NULL;
return (EADDRNOTAVAIL);
}
++*lastport;
if (*lastport < first || *lastport > last)
*lastport = first;
lport = htons(*lastport);
} while (in_pcblookup_local_and_cleanup(pcbinfo,
inp->inp_laddr, lport, wild));
}
}
socket_lock(so, 0);
inp->inp_lport = lport;
if (in_pcbinshash(inp, 1) != 0) {
inp->inp_laddr.s_addr = INADDR_ANY;
inp->inp_lport = 0;
inp->inp_last_outifp = NULL;
lck_rw_done(pcbinfo->mtx);
return (EAGAIN);
}
lck_rw_done(pcbinfo->mtx);
sflt_notify(so, sock_evt_bound, NULL);
return (0);
}
int
in_pcbladdr(struct inpcb *inp, struct sockaddr *nam,
struct sockaddr_in *plocal_sin, struct ifnet **outif)
{
struct in_ifaddr *ia;
struct sockaddr_in *sin = (struct sockaddr_in *)(void *)nam;
if (nam->sa_len != sizeof (*sin))
return (EINVAL);
if (sin->sin_family != AF_INET)
return (EAFNOSUPPORT);
if (sin->sin_port == 0)
return (EADDRNOTAVAIL);
lck_rw_lock_shared(in_ifaddr_rwlock);
if (!TAILQ_EMPTY(&in_ifaddrhead)) {
ia = TAILQ_FIRST(&in_ifaddrhead);
IFA_LOCK_SPIN(&ia->ia_ifa);
if (sin->sin_addr.s_addr == INADDR_ANY)
sin->sin_addr = IA_SIN(ia)->sin_addr;
else if (sin->sin_addr.s_addr == (u_int32_t)INADDR_BROADCAST &&
(ia->ia_ifp->if_flags & IFF_BROADCAST))
sin->sin_addr = satosin(&ia->ia_broadaddr)->sin_addr;
IFA_UNLOCK(&ia->ia_ifa);
ia = NULL;
}
lck_rw_done(in_ifaddr_rwlock);
if (inp->inp_laddr.s_addr == INADDR_ANY) {
struct route *ro;
unsigned int ifscope = IFSCOPE_NONE;
unsigned int nocell;
ia = (struct in_ifaddr *)0;
if (outif != NULL && *outif != NULL)
ifscope = (*outif)->if_index;
else if (inp->inp_flags & INP_BOUND_IF)
ifscope = inp->inp_boundifp->if_index;
nocell = (inp->inp_flags & INP_NO_IFT_CELLULAR) ? 1 : 0;
ro = &inp->inp_route;
if (ro->ro_rt != NULL)
RT_LOCK_SPIN(ro->ro_rt);
if (ro->ro_rt && (ro->ro_dst.sa_family != AF_INET ||
satosin(&ro->ro_dst)->sin_addr.s_addr !=
sin->sin_addr.s_addr ||
inp->inp_socket->so_options & SO_DONTROUTE ||
ro->ro_rt->generation_id != route_generation)) {
RT_UNLOCK(ro->ro_rt);
rtfree(ro->ro_rt);
ro->ro_rt = NULL;
}
if ((inp->inp_socket->so_options & SO_DONTROUTE) == 0 &&
(ro->ro_rt == NULL || ro->ro_rt->rt_ifp == NULL)) {
if (ro->ro_rt != NULL)
RT_UNLOCK(ro->ro_rt);
bzero(&ro->ro_dst, sizeof(struct sockaddr_in));
ro->ro_dst.sa_family = AF_INET;
ro->ro_dst.sa_len = sizeof(struct sockaddr_in);
((struct sockaddr_in *)(void *)&ro->ro_dst)->sin_addr =
sin->sin_addr;
rtalloc_scoped(ro, ifscope);
if (ro->ro_rt != NULL)
RT_LOCK_SPIN(ro->ro_rt);
}
if (nocell && ro->ro_rt != NULL) {
RT_LOCK_ASSERT_HELD(ro->ro_rt);
if (ro->ro_rt->rt_ifp->if_type == IFT_CELLULAR) {
RT_UNLOCK(ro->ro_rt);
rtfree(ro->ro_rt);
ro->ro_rt = NULL;
soevent(inp->inp_socket,
(SO_FILT_HINT_LOCKED |
SO_FILT_HINT_IFDENIED));
}
}
if (ro->ro_rt != NULL) {
RT_CONVERT_LOCK(ro->ro_rt);
if (!(ro->ro_rt->rt_ifp->if_flags & IFF_LOOPBACK)) {
ia = ifatoia(ro->ro_rt->rt_ifa);
if (ia) {
IFA_ADDREF(&ia->ia_ifa);
}
}
RT_UNLOCK(ro->ro_rt);
}
if (ia == 0) {
u_short fport = sin->sin_port;
sin->sin_port = 0;
ia = ifatoia(ifa_ifwithdstaddr(sintosa(sin)));
if (ia == 0) {
ia = ifatoia(ifa_ifwithnet_scoped(sintosa(sin),
ifscope));
}
sin->sin_port = fport;
if (ia == 0) {
lck_rw_lock_shared(in_ifaddr_rwlock);
ia = TAILQ_FIRST(&in_ifaddrhead);
if (ia)
IFA_ADDREF(&ia->ia_ifa);
lck_rw_done(in_ifaddr_rwlock);
}
if (nocell && ia != NULL &&
ia->ia_ifa.ifa_ifp->if_type == IFT_CELLULAR) {
IFA_REMREF(&ia->ia_ifa);
ia = NULL;
soevent(inp->inp_socket,
(SO_FILT_HINT_LOCKED |
SO_FILT_HINT_IFDENIED));
}
if (ia == 0)
return (EADDRNOTAVAIL);
}
if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)) &&
inp->inp_moptions != NULL) {
struct ip_moptions *imo;
struct ifnet *ifp;
imo = inp->inp_moptions;
IMO_LOCK(imo);
if (imo->imo_multicast_ifp != NULL && (ia == NULL ||
ia->ia_ifp != imo->imo_multicast_ifp)) {
ifp = imo->imo_multicast_ifp;
if (ia)
IFA_REMREF(&ia->ia_ifa);
lck_rw_lock_shared(in_ifaddr_rwlock);
TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
if (ia->ia_ifp == ifp)
break;
}
if (ia)
IFA_ADDREF(&ia->ia_ifa);
lck_rw_done(in_ifaddr_rwlock);
if (ia == 0) {
IMO_UNLOCK(imo);
return (EADDRNOTAVAIL);
}
}
IMO_UNLOCK(imo);
}
IFA_LOCK_SPIN(&ia->ia_ifa);
*plocal_sin = ia->ia_addr;
if (outif != NULL)
*outif = ia->ia_ifp;
IFA_UNLOCK(&ia->ia_ifa);
IFA_REMREF(&ia->ia_ifa);
}
return(0);
}
int
in_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct proc *p,
struct ifnet **outif)
{
struct sockaddr_in ifaddr;
struct sockaddr_in *sin = (struct sockaddr_in *)(void *)nam;
struct inpcb *pcb;
int error;
if ((error = in_pcbladdr(inp, nam, &ifaddr, outif)) != 0)
return(error);
socket_unlock(inp->inp_socket, 0);
pcb = in_pcblookup_hash(inp->inp_pcbinfo, sin->sin_addr, sin->sin_port,
inp->inp_laddr.s_addr ? inp->inp_laddr : ifaddr.sin_addr,
inp->inp_lport, 0, NULL);
socket_lock(inp->inp_socket, 0);
if ((inp->inp_socket->so_flags & SOF_ABORTED) != 0) {
return ECONNREFUSED;
}
if (pcb != NULL) {
in_pcb_checkstate(pcb, WNT_RELEASE, pcb == inp ? 1 : 0);
return (EADDRINUSE);
}
if (inp->inp_laddr.s_addr == INADDR_ANY) {
if (inp->inp_lport == 0) {
error = in_pcbbind(inp, (struct sockaddr *)0, p);
if (error)
return (error);
}
if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->mtx)) {
socket_unlock(inp->inp_socket, 0);
lck_rw_lock_exclusive(inp->inp_pcbinfo->mtx);
socket_lock(inp->inp_socket, 0);
}
inp->inp_laddr = ifaddr.sin_addr;
inp->inp_last_outifp = (outif != NULL) ? *outif : NULL;
inp->inp_flags |= INP_INADDR_ANY;
}
else {
if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->mtx)) {
socket_unlock(inp->inp_socket, 0);
lck_rw_lock_exclusive(inp->inp_pcbinfo->mtx);
socket_lock(inp->inp_socket, 0);
}
}
inp->inp_faddr = sin->sin_addr;
inp->inp_fport = sin->sin_port;
in_pcbrehash(inp);
lck_rw_done(inp->inp_pcbinfo->mtx);
return (0);
}
void
in_pcbdisconnect(struct inpcb *inp)
{
inp->inp_faddr.s_addr = INADDR_ANY;
inp->inp_fport = 0;
if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->mtx)) {
socket_unlock(inp->inp_socket, 0);
lck_rw_lock_exclusive(inp->inp_pcbinfo->mtx);
socket_lock(inp->inp_socket, 0);
}
in_pcbrehash(inp);
lck_rw_done(inp->inp_pcbinfo->mtx);
if (inp->inp_socket->so_state & SS_NOFDREF)
in_pcbdetach(inp);
}
void
in_pcbdetach(struct inpcb *inp)
{
struct socket *so = inp->inp_socket;
if (so->so_pcb == 0) {
panic("in_pcbdetach: inp=%p so=%p proto=%d so_pcb is null!\n",
inp, so, so->so_proto->pr_protocol);
}
#if IPSEC
if (ipsec_bypass == 0) {
ipsec4_delete_pcbpolicy(inp);
}
#endif
if (in_pcb_checkstate(inp, WNT_STOPUSING, 1) != WNT_STOPUSING)
panic("in_pcbdetach so=%p prot=%x couldn't set to STOPUSING\n", so, so->so_proto->pr_protocol);
#if TEMPDEBUG
if (so->cached_in_sock_layer)
printf("in_pcbdetach for cached socket %x flags=%x\n", so, so->so_flags);
else
printf("in_pcbdetach for allocated socket %x flags=%x\n", so, so->so_flags);
#endif
if ((so->so_flags & SOF_PCBCLEARING) == 0) {
struct rtentry *rt;
struct ip_moptions *imo;
inp->inp_vflag = 0;
if (inp->inp_options)
(void)m_free(inp->inp_options);
if ((rt = inp->inp_route.ro_rt) != NULL) {
inp->inp_route.ro_rt = NULL;
rtfree(rt);
}
imo = inp->inp_moptions;
inp->inp_moptions = NULL;
if (imo != NULL)
IMO_REMREF(imo);
sofreelastref(so, 0);
inp->inp_state = INPCB_STATE_DEAD;
so->so_flags |= SOF_PCBCLEARING;
}
}
void
in_pcbdispose(struct inpcb *inp)
{
struct socket *so = inp->inp_socket;
struct inpcbinfo *ipi = inp->inp_pcbinfo;
#if TEMPDEBUG
if (inp->inp_state != INPCB_STATE_DEAD) {
printf("in_pcbdispose: not dead yet? so=%p\n", so);
}
#endif
if (so && so->so_usecount != 0)
panic("%s: so %p so_usecount %d so_lockhistory %s\n",
__func__, so, so->so_usecount,
(so != NULL) ? solockhistory_nr(so) : "--");
lck_rw_assert(ipi->mtx, LCK_RW_ASSERT_EXCLUSIVE);
inp->inp_gencnt = ++ipi->ipi_gencnt;
in_pcbremlists(inp);
if (so) {
if (so->so_proto->pr_flags & PR_PCBLOCK) {
sofreelastref(so, 0);
if (so->so_rcv.sb_cc || so->so_snd.sb_cc) {
#if TEMPDEBUG
printf("in_pcbdispose sb not cleaned up so=%p rc_cci=%x snd_cc=%x\n",
so, so->so_rcv.sb_cc, so->so_snd.sb_cc);
#endif
sbrelease(&so->so_rcv);
sbrelease(&so->so_snd);
}
if (so->so_head != NULL)
panic("in_pcbdispose, so=%p head still exist\n", so);
lck_mtx_unlock(&inp->inpcb_mtx);
lck_mtx_destroy(&inp->inpcb_mtx, ipi->mtx_grp);
}
so->so_flags |= SOF_PCBCLEARING;
so->so_saved_pcb = (caddr_t) inp;
so->so_pcb = 0;
inp->inp_socket = 0;
#if CONFIG_MACF_NET
mac_inpcb_label_destroy(inp);
#endif
if (inp->inp_route.ro_rt != NULL) {
rtfree(inp->inp_route.ro_rt);
inp->inp_route.ro_rt = NULL;
}
if (so->cached_in_sock_layer == 0) {
zfree(ipi->ipi_zone, inp);
}
sodealloc(so);
}
#if TEMPDEBUG
else
printf("in_pcbdispose: no socket for inp=%p\n", inp);
#endif
}
int
in_setsockaddr(struct socket *so, struct sockaddr **nam)
{
struct inpcb *inp;
struct sockaddr_in *sin;
MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME, M_WAITOK);
if (sin == NULL)
return ENOBUFS;
bzero(sin, sizeof *sin);
sin->sin_family = AF_INET;
sin->sin_len = sizeof(*sin);
inp = sotoinpcb(so);
if (!inp) {
FREE(sin, M_SONAME);
return ECONNRESET;
}
sin->sin_port = inp->inp_lport;
sin->sin_addr = inp->inp_laddr;
*nam = (struct sockaddr *)sin;
return 0;
}
int
in_setpeeraddr(struct socket *so, struct sockaddr **nam)
{
struct inpcb *inp;
struct sockaddr_in *sin;
MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME, M_WAITOK);
if (sin == NULL)
return ENOBUFS;
bzero((caddr_t)sin, sizeof (*sin));
sin->sin_family = AF_INET;
sin->sin_len = sizeof(*sin);
inp = sotoinpcb(so);
if (!inp) {
FREE(sin, M_SONAME);
return ECONNRESET;
}
sin->sin_port = inp->inp_fport;
sin->sin_addr = inp->inp_faddr;
*nam = (struct sockaddr *)sin;
return 0;
}
void
in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr faddr,
int errno, void (*notify)(struct inpcb *, int))
{
struct inpcb *inp;
lck_rw_lock_shared(pcbinfo->mtx);
LIST_FOREACH(inp, pcbinfo->listhead, inp_list) {
#if INET6
if ((inp->inp_vflag & INP_IPV4) == 0)
continue;
#endif
if (inp->inp_faddr.s_addr != faddr.s_addr ||
inp->inp_socket == NULL)
continue;
if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING)
continue;
socket_lock(inp->inp_socket, 1);
(*notify)(inp, errno);
(void)in_pcb_checkstate(inp, WNT_RELEASE, 1);
socket_unlock(inp->inp_socket, 1);
}
lck_rw_done(pcbinfo->mtx);
}
void
in_losing(struct inpcb *inp)
{
struct rtentry *rt;
struct rt_addrinfo info;
if ((rt = inp->inp_route.ro_rt) != NULL) {
struct in_ifaddr *ia;
bzero((caddr_t)&info, sizeof(info));
RT_LOCK(rt);
info.rti_info[RTAX_DST] =
(struct sockaddr *)&inp->inp_route.ro_dst;
info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
info.rti_info[RTAX_NETMASK] = rt_mask(rt);
rt_missmsg(RTM_LOSING, &info, rt->rt_flags, 0);
if (rt->rt_flags & RTF_DYNAMIC) {
rt->rt_flags |= RTF_CONDEMNED;
RT_UNLOCK(rt);
(void) rtrequest(RTM_DELETE, rt_key(rt),
rt->rt_gateway, rt_mask(rt), rt->rt_flags,
(struct rtentry **)0);
} else {
RT_UNLOCK(rt);
}
if ((ia = ifa_foraddr(inp->inp_laddr.s_addr)) != NULL) {
inp->inp_route.ro_rt = NULL;
rtfree(rt);
IFA_REMREF(&ia->ia_ifa);
}
}
}
void
in_rtchange(struct inpcb *inp, __unused int errno)
{
struct rtentry *rt;
if ((rt = inp->inp_route.ro_rt) != NULL) {
struct in_ifaddr *ia;
if ((ia = ifa_foraddr(inp->inp_laddr.s_addr)) == NULL) {
return;
}
IFA_REMREF(&ia->ia_ifa);
rtfree(rt);
inp->inp_route.ro_rt = NULL;
}
}
struct inpcb *
in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr,
unsigned int lport_arg, int wild_okay)
{
struct inpcb *inp;
int matchwild = 3, wildcard;
u_short lport = lport_arg;
KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_START, 0,0,0,0,0);
if (!wild_okay) {
struct inpcbhead *head;
head = &pcbinfo->hashbase[INP_PCBHASH(INADDR_ANY, lport, 0, pcbinfo->hashmask)];
LIST_FOREACH(inp, head, inp_hash) {
#if INET6
if ((inp->inp_vflag & INP_IPV4) == 0)
continue;
#endif
if (inp->inp_faddr.s_addr == INADDR_ANY &&
inp->inp_laddr.s_addr == laddr.s_addr &&
inp->inp_lport == lport) {
return (inp);
}
}
KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, 0,0,0,0,0);
return (NULL);
} else {
struct inpcbporthead *porthash;
struct inpcbport *phd;
struct inpcb *match = NULL;
porthash = &pcbinfo->porthashbase[INP_PCBPORTHASH(lport,
pcbinfo->porthashmask)];
LIST_FOREACH(phd, porthash, phd_hash) {
if (phd->phd_port == lport)
break;
}
if (phd != NULL) {
LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) {
wildcard = 0;
#if INET6
if ((inp->inp_vflag & INP_IPV4) == 0)
continue;
#endif
if (inp->inp_faddr.s_addr != INADDR_ANY)
wildcard++;
if (inp->inp_laddr.s_addr != INADDR_ANY) {
if (laddr.s_addr == INADDR_ANY)
wildcard++;
else if (inp->inp_laddr.s_addr != laddr.s_addr)
continue;
} else {
if (laddr.s_addr != INADDR_ANY)
wildcard++;
}
if (wildcard < matchwild) {
match = inp;
matchwild = wildcard;
if (matchwild == 0) {
break;
}
}
}
}
KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, match,0,0,0,0);
return (match);
}
}
int
in_pcblookup_hash_exists(
struct inpcbinfo *pcbinfo,
struct in_addr faddr,
u_int fport_arg,
struct in_addr laddr,
u_int lport_arg,
int wildcard,
uid_t *uid,
gid_t *gid,
struct ifnet *ifp)
{
struct inpcbhead *head;
struct inpcb *inp;
u_short fport = fport_arg, lport = lport_arg;
int found;
*uid = UID_MAX;
*gid = GID_MAX;
lck_rw_lock_shared(pcbinfo->mtx);
head = &pcbinfo->hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
pcbinfo->hashmask)];
LIST_FOREACH(inp, head, inp_hash) {
#if INET6
if ((inp->inp_vflag & INP_IPV4) == 0)
continue;
#endif
if (ip_restrictrecvif && ifp != NULL &&
(ifp->if_eflags & IFEF_RESTRICTED_RECV) &&
!(inp->inp_flags & INP_RECV_ANYIF))
continue;
if (inp->inp_faddr.s_addr == faddr.s_addr &&
inp->inp_laddr.s_addr == laddr.s_addr &&
inp->inp_fport == fport &&
inp->inp_lport == lport) {
if ((found = (inp->inp_socket != NULL))) {
*uid = kauth_cred_getuid(
inp->inp_socket->so_cred);
*gid = kauth_cred_getgid(
inp->inp_socket->so_cred);
}
lck_rw_done(pcbinfo->mtx);
return (found);
}
}
if (wildcard) {
struct inpcb *local_wild = NULL;
#if INET6
struct inpcb *local_wild_mapped = NULL;
#endif
head = &pcbinfo->hashbase[INP_PCBHASH(INADDR_ANY, lport, 0,
pcbinfo->hashmask)];
LIST_FOREACH(inp, head, inp_hash) {
#if INET6
if ((inp->inp_vflag & INP_IPV4) == 0)
continue;
#endif
if (ip_restrictrecvif && ifp != NULL &&
(ifp->if_eflags & IFEF_RESTRICTED_RECV) &&
!(inp->inp_flags & INP_RECV_ANYIF))
continue;
if (inp->inp_faddr.s_addr == INADDR_ANY &&
inp->inp_lport == lport) {
if (inp->inp_laddr.s_addr == laddr.s_addr) {
if ((found = (inp->inp_socket != NULL))) {
*uid = kauth_cred_getuid(
inp->inp_socket->so_cred);
*gid = kauth_cred_getgid(
inp->inp_socket->so_cred);
}
lck_rw_done(pcbinfo->mtx);
return (found);
}
else if (inp->inp_laddr.s_addr == INADDR_ANY) {
#if INET6
if (inp->inp_socket &&
INP_CHECK_SOCKAF(inp->inp_socket,
AF_INET6))
local_wild_mapped = inp;
else
#endif
local_wild = inp;
}
}
}
if (local_wild == NULL) {
#if INET6
if (local_wild_mapped != NULL) {
if ((found = (local_wild_mapped->inp_socket != NULL))) {
*uid = kauth_cred_getuid(
local_wild_mapped->inp_socket->so_cred);
*gid = kauth_cred_getgid(
local_wild_mapped->inp_socket->so_cred);
}
lck_rw_done(pcbinfo->mtx);
return (found);
}
#endif
lck_rw_done(pcbinfo->mtx);
return (0);
}
if (local_wild != NULL) {
if ((found = (local_wild->inp_socket != NULL))) {
*uid = kauth_cred_getuid(
local_wild->inp_socket->so_cred);
*gid = kauth_cred_getgid(
local_wild->inp_socket->so_cred);
}
lck_rw_done(pcbinfo->mtx);
return (found);
}
}
lck_rw_done(pcbinfo->mtx);
return (0);
}
struct inpcb *
in_pcblookup_hash(
struct inpcbinfo *pcbinfo,
struct in_addr faddr,
u_int fport_arg,
struct in_addr laddr,
u_int lport_arg,
int wildcard,
struct ifnet *ifp)
{
struct inpcbhead *head;
struct inpcb *inp;
u_short fport = fport_arg, lport = lport_arg;
lck_rw_lock_shared(pcbinfo->mtx);
head = &pcbinfo->hashbase[INP_PCBHASH(faddr.s_addr, lport, fport, pcbinfo->hashmask)];
LIST_FOREACH(inp, head, inp_hash) {
#if INET6
if ((inp->inp_vflag & INP_IPV4) == 0)
continue;
#endif
if (ip_restrictrecvif && ifp != NULL &&
(ifp->if_eflags & IFEF_RESTRICTED_RECV) &&
!(inp->inp_flags & INP_RECV_ANYIF))
continue;
if (inp->inp_faddr.s_addr == faddr.s_addr &&
inp->inp_laddr.s_addr == laddr.s_addr &&
inp->inp_fport == fport &&
inp->inp_lport == lport) {
if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
lck_rw_done(pcbinfo->mtx);
return (inp);
}
else {
lck_rw_done(pcbinfo->mtx);
return (NULL);
}
}
}
if (wildcard) {
struct inpcb *local_wild = NULL;
#if INET6
struct inpcb *local_wild_mapped = NULL;
#endif
head = &pcbinfo->hashbase[INP_PCBHASH(INADDR_ANY, lport, 0, pcbinfo->hashmask)];
LIST_FOREACH(inp, head, inp_hash) {
#if INET6
if ((inp->inp_vflag & INP_IPV4) == 0)
continue;
#endif
if (ip_restrictrecvif && ifp != NULL &&
(ifp->if_eflags & IFEF_RESTRICTED_RECV) &&
!(inp->inp_flags & INP_RECV_ANYIF))
continue;
if (inp->inp_faddr.s_addr == INADDR_ANY &&
inp->inp_lport == lport) {
if (inp->inp_laddr.s_addr == laddr.s_addr) {
if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
lck_rw_done(pcbinfo->mtx);
return (inp);
}
else {
lck_rw_done(pcbinfo->mtx);
return (NULL);
}
}
else if (inp->inp_laddr.s_addr == INADDR_ANY) {
#if INET6
if (INP_CHECK_SOCKAF(inp->inp_socket,
AF_INET6))
local_wild_mapped = inp;
else
#endif
local_wild = inp;
}
}
}
if (local_wild == NULL) {
#if INET6
if (local_wild_mapped != NULL) {
if (in_pcb_checkstate(local_wild_mapped, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
lck_rw_done(pcbinfo->mtx);
return (local_wild_mapped);
}
else {
lck_rw_done(pcbinfo->mtx);
return (NULL);
}
}
#endif
lck_rw_done(pcbinfo->mtx);
return (NULL);
}
if (in_pcb_checkstate(local_wild, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
lck_rw_done(pcbinfo->mtx);
return (local_wild);
}
else {
lck_rw_done(pcbinfo->mtx);
return (NULL);
}
}
lck_rw_done(pcbinfo->mtx);
return (NULL);
}
int
in_pcbinshash(struct inpcb *inp, int locked)
{
struct inpcbhead *pcbhash;
struct inpcbporthead *pcbporthash;
struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
struct inpcbport *phd;
u_int32_t hashkey_faddr;
if (!locked) {
if (!lck_rw_try_lock_exclusive(pcbinfo->mtx)) {
socket_unlock(inp->inp_socket, 0);
lck_rw_lock_exclusive(pcbinfo->mtx);
socket_lock(inp->inp_socket, 0);
if (inp->inp_state == INPCB_STATE_DEAD) {
lck_rw_done(pcbinfo->mtx);
return(ECONNABORTED);
}
}
}
#if INET6
if (inp->inp_vflag & INP_IPV6)
hashkey_faddr = inp->in6p_faddr.s6_addr32[3] ;
else
#endif
hashkey_faddr = inp->inp_faddr.s_addr;
inp->hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport, inp->inp_fport, pcbinfo->hashmask);
pcbhash = &pcbinfo->hashbase[inp->hash_element];
pcbporthash = &pcbinfo->porthashbase[INP_PCBPORTHASH(inp->inp_lport,
pcbinfo->porthashmask)];
LIST_FOREACH(phd, pcbporthash, phd_hash) {
if (phd->phd_port == inp->inp_lport)
break;
}
VERIFY(inp->inp_state != INPCB_STATE_DEAD);
if (phd == NULL) {
MALLOC(phd, struct inpcbport *, sizeof(struct inpcbport), M_PCB, M_WAITOK);
if (phd == NULL) {
if (!locked)
lck_rw_done(pcbinfo->mtx);
return (ENOBUFS);
}
phd->phd_port = inp->inp_lport;
LIST_INIT(&phd->phd_pcblist);
LIST_INSERT_HEAD(pcbporthash, phd, phd_hash);
}
inp->inp_phd = phd;
LIST_INSERT_HEAD(&phd->phd_pcblist, inp, inp_portlist);
LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
if (!locked)
lck_rw_done(pcbinfo->mtx);
return (0);
}
void
in_pcbrehash(struct inpcb *inp)
{
struct inpcbhead *head;
u_int32_t hashkey_faddr;
#if INET6
if (inp->inp_vflag & INP_IPV6)
hashkey_faddr = inp->in6p_faddr.s6_addr32[3] ;
else
#endif
hashkey_faddr = inp->inp_faddr.s_addr;
inp->hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport,
inp->inp_fport, inp->inp_pcbinfo->hashmask);
head = &inp->inp_pcbinfo->hashbase[inp->hash_element];
LIST_REMOVE(inp, inp_hash);
LIST_INSERT_HEAD(head, inp, inp_hash);
}
void
in_pcbremlists(struct inpcb *inp)
{
struct inp_fc_entry *infce;
inp->inp_gencnt = ++inp->inp_pcbinfo->ipi_gencnt;
if (inp->inp_lport) {
struct inpcbport *phd = inp->inp_phd;
LIST_REMOVE(inp, inp_hash);
LIST_REMOVE(inp, inp_portlist);
if (phd != NULL && (LIST_FIRST(&phd->phd_pcblist) == NULL)) {
LIST_REMOVE(phd, phd_hash);
FREE(phd, M_PCB);
}
}
LIST_REMOVE(inp, inp_list);
infce = inp_fc_getinp(inp->inp_flowhash);
if (infce != NULL)
inp_fc_entry_free(infce);
inp->inp_pcbinfo->ipi_count--;
}
int
in_pcb_checkstate(struct inpcb *pcb, int mode, int locked)
{
volatile UInt32 *wantcnt = (volatile UInt32 *)&pcb->inp_wantcnt;
UInt32 origwant;
UInt32 newwant;
switch (mode) {
case WNT_STOPUSING:
if (locked == 0)
socket_lock(pcb->inp_socket, 1);
pcb->inp_state = INPCB_STATE_DEAD;
stopusing:
if (pcb->inp_socket->so_usecount < 0)
panic("in_pcb_checkstate STOP pcb=%p so=%p usecount is negative\n", pcb, pcb->inp_socket);
if (locked == 0)
socket_unlock(pcb->inp_socket, 1);
origwant = *wantcnt;
if ((UInt16) origwant == 0xffff )
return (WNT_STOPUSING);
newwant = 0xffff;
if ((UInt16) origwant == 0) {
OSCompareAndSwap(origwant, newwant, wantcnt) ;
}
return (WNT_STOPUSING);
break;
case WNT_ACQUIRE:
do {
origwant = *wantcnt;
if ((UInt16) origwant == 0xffff ) {
return (WNT_STOPUSING);
}
newwant = origwant + 1;
} while (!OSCompareAndSwap(origwant, newwant, wantcnt));
return (WNT_ACQUIRE);
break;
case WNT_RELEASE:
if (locked == 0)
socket_lock(pcb->inp_socket, 1);
do {
origwant = *wantcnt;
if ((UInt16) origwant == 0x0 )
panic("in_pcb_checkstate pcb=%p release with zero count", pcb);
if ((UInt16) origwant == 0xffff ) {
#if TEMPDEBUG
printf("in_pcb_checkstate: REL PCB was STOPUSING while release. odd pcb=%p\n", pcb);
#endif
if (locked == 0)
socket_unlock(pcb->inp_socket, 1);
return (WNT_STOPUSING);
}
newwant = origwant - 1;
} while (!OSCompareAndSwap(origwant, newwant, wantcnt));
if (pcb->inp_state == INPCB_STATE_DEAD)
goto stopusing;
if (pcb->inp_socket->so_usecount < 0)
panic("in_pcb_checkstate RELEASE pcb=%p so=%p usecount is negative\n", pcb, pcb->inp_socket);
if (locked == 0)
socket_unlock(pcb->inp_socket, 1);
return (WNT_RELEASE);
break;
default:
panic("in_pcb_checkstate: so=%p not a valid state =%x\n", pcb->inp_socket, mode);
}
return (mode);
}
void
inpcb_to_compat(
struct inpcb *inp,
struct inpcb_compat *inp_compat)
{
bzero(inp_compat, sizeof(*inp_compat));
inp_compat->inp_fport = inp->inp_fport;
inp_compat->inp_lport = inp->inp_lport;
inp_compat->nat_owner = 0;
inp_compat->nat_cookie = inp->nat_cookie;
inp_compat->inp_gencnt = inp->inp_gencnt;
inp_compat->inp_flags = inp->inp_flags;
inp_compat->inp_flow = inp->inp_flow;
inp_compat->inp_vflag = inp->inp_vflag;
inp_compat->inp_ip_ttl = inp->inp_ip_ttl;
inp_compat->inp_ip_p = inp->inp_ip_p;
inp_compat->inp_dependfaddr.inp6_foreign = inp->inp_dependfaddr.inp6_foreign;
inp_compat->inp_dependladdr.inp6_local = inp->inp_dependladdr.inp6_local;
inp_compat->inp_depend4.inp4_ip_tos = inp->inp_depend4.inp4_ip_tos;
inp_compat->inp_depend6.inp6_hlim = inp->inp_depend6.inp6_hlim;
inp_compat->inp_depend6.inp6_cksum = inp->inp_depend6.inp6_cksum;
inp_compat->inp_depend6.inp6_ifindex = inp->inp_depend6.inp6_ifindex;
inp_compat->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops;
}
#if !CONFIG_EMBEDDED
void
inpcb_to_xinpcb64(
struct inpcb *inp,
struct xinpcb64 *xinp)
{
xinp->inp_fport = inp->inp_fport;
xinp->inp_lport = inp->inp_lport;
xinp->inp_gencnt = inp->inp_gencnt;
xinp->inp_flags = inp->inp_flags;
xinp->inp_flow = inp->inp_flow;
xinp->inp_vflag = inp->inp_vflag;
xinp->inp_ip_ttl = inp->inp_ip_ttl;
xinp->inp_ip_p = inp->inp_ip_p;
xinp->inp_dependfaddr.inp6_foreign = inp->inp_dependfaddr.inp6_foreign;
xinp->inp_dependladdr.inp6_local = inp->inp_dependladdr.inp6_local;
xinp->inp_depend4.inp4_ip_tos = inp->inp_depend4.inp4_ip_tos;
xinp->inp_depend6.inp6_hlim = inp->inp_depend6.inp6_hlim;
xinp->inp_depend6.inp6_cksum = inp->inp_depend6.inp6_cksum;
xinp->inp_depend6.inp6_ifindex = inp->inp_depend6.inp6_ifindex;
xinp->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops;
}
#endif
void
inp_route_copyout(struct inpcb *inp, struct route *dst)
{
struct route *src = &inp->inp_route;
lck_mtx_assert(&inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
rtfree(src->ro_rt);
src->ro_rt = NULL;
}
route_copyout(dst, src, sizeof(*dst));
}
void
inp_route_copyin(struct inpcb *inp, struct route *src)
{
struct route *dst = &inp->inp_route;
lck_mtx_assert(&inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET)
panic("%s: wrong or corrupted route: %p", __func__, src);
route_copyin(src, dst, sizeof(*src));
}
int
inp_bindif(struct inpcb *inp, unsigned int ifscope)
{
struct ifnet *ifp = NULL;
ifnet_head_lock_shared();
if ((ifscope > (unsigned)if_index) || (ifscope != IFSCOPE_NONE &&
(ifp = ifindex2ifnet[ifscope]) == NULL)) {
ifnet_head_done();
return (ENXIO);
}
ifnet_head_done();
VERIFY(ifp != NULL || ifscope == IFSCOPE_NONE);
inp->inp_boundifp = ifp;
if (inp->inp_boundifp == NULL)
inp->inp_flags &= ~INP_BOUND_IF;
else
inp->inp_flags |= INP_BOUND_IF;
if (inp->inp_route.ro_rt != NULL) {
rtfree(inp->inp_route.ro_rt);
inp->inp_route.ro_rt = NULL;
}
return (0);
}
int
inp_nocellular(struct inpcb *inp, unsigned int val)
{
if (val) {
inp->inp_flags |= INP_NO_IFT_CELLULAR;
} else if (inp->inp_flags & INP_NO_IFT_CELLULAR) {
return (EINVAL);
}
if (inp->inp_route.ro_rt != NULL) {
rtfree(inp->inp_route.ro_rt);
inp->inp_route.ro_rt = NULL;
}
return (0);
}
u_int32_t
inp_calc_flowhash(struct inpcb *inp)
{
struct inp_flowhash_key fh __attribute__((aligned(8)));
u_int32_t flowhash = 0;
if (inp_hash_seed == 0)
inp_hash_seed = RandomULong();
bzero(&fh, sizeof (fh));
bcopy(&inp->inp_dependladdr, &fh.infh_laddr, sizeof (fh.infh_laddr));
bcopy(&inp->inp_dependfaddr, &fh.infh_faddr, sizeof (fh.infh_faddr));
fh.infh_lport = inp->inp_lport;
fh.infh_fport = inp->inp_fport;
fh.infh_af = (inp->inp_vflag & INP_IPV6) ? AF_INET6 : AF_INET;
fh.infh_proto = inp->inp_ip_p;
fh.infh_rand1 = RandomULong();
fh.infh_rand2 = RandomULong();
try_again:
flowhash = net_flowhash(&fh, sizeof (fh), inp_hash_seed);
if (flowhash == 0) {
inp_hash_seed = RandomULong();
goto try_again;
}
return flowhash;
}
static inline int
infc_cmp(const struct inp_fc_entry *fc1, const struct inp_fc_entry *fc2)
{
return (fc1->infc_flowhash - fc2->infc_flowhash);
}
int
inp_fc_addinp(struct inpcb *inp)
{
struct inp_fc_entry keyfc, *infc;
u_int32_t flowhash = inp->inp_flowhash;
keyfc.infc_flowhash = flowhash;
lck_mtx_lock_spin(&inp_fc_lck);
infc = RB_FIND(inp_fc_tree, &inp_fc_tree, &keyfc);
if (infc != NULL && infc->infc_inp == inp) {
lck_mtx_unlock(&inp_fc_lck);
return (1);
}
if (infc != NULL) {
lck_mtx_unlock(&inp_fc_lck);
return (0);
}
lck_mtx_convert_spin(&inp_fc_lck);
infc = zalloc_noblock(inp_fcezone);
if (infc == NULL) {
lck_mtx_unlock(&inp_fc_lck);
return (0);
}
bzero(infc, sizeof (*infc));
infc->infc_flowhash = flowhash;
infc->infc_inp = inp;
RB_INSERT(inp_fc_tree, &inp_fc_tree, infc);
lck_mtx_unlock(&inp_fc_lck);
return (1);
}
struct inp_fc_entry*
inp_fc_getinp(u_int32_t flowhash)
{
struct inp_fc_entry keyfc, *infc;
keyfc.infc_flowhash = flowhash;
lck_mtx_lock_spin(&inp_fc_lck);
infc = RB_FIND(inp_fc_tree, &inp_fc_tree, &keyfc);
if (infc == NULL) {
lck_mtx_unlock(&inp_fc_lck);
return (NULL);
}
RB_REMOVE(inp_fc_tree, &inp_fc_tree, infc);
if (in_pcb_checkstate(infc->infc_inp, WNT_ACQUIRE, 0) ==
WNT_STOPUSING) {
lck_mtx_convert_spin(&inp_fc_lck);
inp_fc_entry_free(infc);
infc = NULL;
}
lck_mtx_unlock(&inp_fc_lck);
return (infc);
}
void
inp_fc_entry_free(struct inp_fc_entry *infc)
{
zfree(inp_fcezone, infc);
}
void
inp_fc_feedback(struct inpcb *inp)
{
struct socket *so = inp->inp_socket;
VERIFY (so != NULL);
socket_lock(so, 1);
if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
socket_unlock(so, 1);
return;
}
if (!INP_WAIT_FOR_IF_FEEDBACK(inp)) {
socket_unlock(so, 1);
return;
}
inp_reset_fc_state(inp);
if (so->so_proto->pr_type == SOCK_STREAM)
inp_fc_unthrottle_tcp(inp);
socket_unlock(so, 1);
}
void
inp_reset_fc_state(struct inpcb *inp)
{
struct socket *so = inp->inp_socket;
int suspended = (INP_IS_FLOW_SUSPENDED(inp)) ? 1 : 0;
int needwakeup = (INP_WAIT_FOR_IF_FEEDBACK(inp)) ? 1 : 0;
inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
if (suspended) {
so->so_flags &= ~(SOF_SUSPENDED);
soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_RESUME));
}
if (inp->inp_sndinprog_cnt > 0)
inp->inp_flags |= INP_FC_FEEDBACK;
if (needwakeup)
sowwakeup(so);
}
int
inp_set_fc_state(struct inpcb *inp, int advcode)
{
if (inp->inp_flags & INP_FC_FEEDBACK)
return(0);
inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED);
if (inp_fc_addinp(inp)) {
switch (advcode) {
case FADV_FLOW_CONTROLLED:
inp->inp_flags |= INP_FLOW_CONTROLLED;
break;
case FADV_SUSPENDED:
inp->inp_flags |= INP_FLOW_SUSPENDED;
soevent(inp->inp_socket,
(SO_FILT_HINT_LOCKED | SO_FILT_HINT_SUSPEND));
inp->inp_socket->so_flags |= SOF_SUSPENDED;
break;
}
}
return(1);
}
int
inp_flush(struct inpcb *inp, int optval)
{
u_int32_t flowhash = inp->inp_flowhash;
struct rtentry *rt;
if (optval != SO_TC_ALL && !SO_VALID_TC(optval))
return (EINVAL);
if (flowhash == 0)
return (0);
if ((rt = inp->inp_route.ro_rt) != NULL) {
struct ifnet *ifp = rt->rt_ifp;
if_qflush_sc(ifp, so_tc2msc(optval), flowhash, NULL, NULL, 0);
}
return (0);
}
void inp_clear_INP_INADDR_ANY(struct socket *so)
{
struct inpcb *inp = NULL;
socket_lock(so, 1);
inp = sotoinpcb(so);
if (inp) {
inp->inp_flags &= ~INP_INADDR_ANY;
}
socket_unlock(so, 1);
}