#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/domain.h>
#include <sys/syslog.h>
#include <sys/queue.h>
#include <sys/mcache.h>
#include <sys/protosw.h>
#include <kern/lock.h>
#include <kern/zalloc.h>
#include <net/if.h>
#include <net/route.h>
#include <net/ntstat.h>
#include <netinet/in.h>
#include <netinet/in_var.h>
#include <netinet/ip_mroute.h>
#include <netinet/ip_var.h>
#include <netinet/ip6.h>
#if INET6
#include <netinet6/ip6_var.h>
#include <netinet6/in6_var.h>
#endif
#include <net/if_dl.h>
#include <libkern/OSAtomic.h>
#include <libkern/OSDebug.h>
#include <pexpert/pexpert.h>
#define equal(a1, a2) (bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0)
extern void kdp_set_gateway_mac (void *gatewaymac);
extern struct domain routedomain;
struct route_cb route_cb;
__private_extern__ struct rtstat rtstat = { 0, 0, 0, 0, 0 };
struct radix_node_head *rt_tables[AF_MAX+1];
lck_mtx_t *rnh_lock;
static lck_attr_t *rnh_lock_attr;
static lck_grp_t *rnh_lock_grp;
static lck_grp_attr_t *rnh_lock_grp_attr;
static lck_attr_t *rte_mtx_attr;
static lck_grp_t *rte_mtx_grp;
static lck_grp_attr_t *rte_mtx_grp_attr;
lck_mtx_t *route_domain_mtx;
int rttrash = 0;
unsigned int rte_debug;
#define RTD_DEBUG 0x1
#define RTD_TRACE 0x2
#define RTD_NO_FREE 0x4
#define RTE_NAME "rtentry"
static struct zone *rte_zone;
#define RTE_ZONE_MAX 65536
#define RTE_ZONE_NAME RTE_NAME
#define RTD_INUSE 0xFEEDFACE
#define RTD_FREED 0xDEADBEEF
__private_extern__ unsigned int ctrace_stack_size = CTRACE_STACK_SIZE;
__private_extern__ unsigned int ctrace_hist_size = CTRACE_HIST_SIZE;
struct rtentry_dbg {
struct rtentry rtd_entry;
struct rtentry rtd_entry_saved;
uint32_t rtd_inuse;
uint16_t rtd_refhold_cnt;
uint16_t rtd_refrele_cnt;
uint32_t rtd_lock_cnt;
uint32_t rtd_unlock_cnt;
ctrace_t rtd_alloc;
ctrace_t rtd_free;
ctrace_t rtd_refhold[CTRACE_HIST_SIZE];
ctrace_t rtd_refrele[CTRACE_HIST_SIZE];
ctrace_t rtd_lock[CTRACE_HIST_SIZE];
ctrace_t rtd_unlock[CTRACE_HIST_SIZE];
TAILQ_ENTRY(rtentry_dbg) rtd_trash_link;
};
static TAILQ_HEAD(, rtentry_dbg) rttrash_head;
static void rte_lock_init(struct rtentry *);
static void rte_lock_destroy(struct rtentry *);
static inline struct rtentry *rte_alloc_debug(void);
static inline void rte_free_debug(struct rtentry *);
static inline void rte_lock_debug(struct rtentry_dbg *);
static inline void rte_unlock_debug(struct rtentry_dbg *);
static void rt_maskedcopy(struct sockaddr *,
struct sockaddr *, struct sockaddr *);
static void rtable_init(void **);
static inline void rtref_audit(struct rtentry_dbg *);
static inline void rtunref_audit(struct rtentry_dbg *);
static struct rtentry *rtalloc1_common_locked(struct sockaddr *, int, uint32_t,
unsigned int);
static int rtrequest_common_locked(int, struct sockaddr *,
struct sockaddr *, struct sockaddr *, int, struct rtentry **,
unsigned int);
static struct rtentry *rtalloc1_locked(struct sockaddr *, int, uint32_t);
static void rtalloc_ign_common_locked(struct route *, uint32_t, unsigned int);
static inline void sin6_set_ifscope(struct sockaddr *, unsigned int);
static inline void sin6_set_embedded_ifscope(struct sockaddr *, unsigned int);
static inline unsigned int sin6_get_embedded_ifscope(struct sockaddr *);
static struct sockaddr *sa_copy(struct sockaddr *, struct sockaddr_storage *,
unsigned int *);
static struct sockaddr *ma_copy(int, struct sockaddr *,
struct sockaddr_storage *, unsigned int);
static struct sockaddr *sa_trim(struct sockaddr *, int);
static struct radix_node *node_lookup(struct sockaddr *, struct sockaddr *,
unsigned int);
static struct radix_node *node_lookup_default(int);
static int rn_match_ifscope(struct radix_node *, void *);
static struct ifaddr *ifa_ifwithroute_common_locked(int,
const struct sockaddr *, const struct sockaddr *, unsigned int);
static struct rtentry *rte_alloc(void);
static void rte_free(struct rtentry *);
static void rtfree_common(struct rtentry *, boolean_t);
static void rte_if_ref(struct ifnet *, int);
uint32_t route_generation = 0;
struct sockaddr_inifscope {
__uint8_t sin_len;
sa_family_t sin_family;
in_port_t sin_port;
struct in_addr sin_addr;
union {
char sin_zero[8];
struct {
__uint32_t ifscope;
} _in_index;
} un;
#define sin_scope_id un._in_index.ifscope
};
#define SA(sa) ((struct sockaddr *)(size_t)(sa))
#define SIN(sa) ((struct sockaddr_in *)(size_t)(sa))
#define SIN6(sa) ((struct sockaddr_in6 *)(size_t)(sa))
#define SINIFSCOPE(sa) ((struct sockaddr_inifscope *)(size_t)(sa))
#define SIN6IFSCOPE(sa) SIN6(sa)
#define ASSERT_SINIFSCOPE(sa) { \
if ((sa)->sa_family != AF_INET || \
(sa)->sa_len < sizeof (struct sockaddr_in)) \
panic("%s: bad sockaddr_in %p\n", __func__, sa); \
}
#define ASSERT_SIN6IFSCOPE(sa) { \
if ((sa)->sa_family != AF_INET6 || \
(sa)->sa_len < sizeof (struct sockaddr_in6)) \
panic("%s: bad sockaddr_in %p\n", __func__, sa); \
}
struct matchleaf_arg {
unsigned int ifscope;
};
static struct sockaddr sin_def = {
sizeof (struct sockaddr_in), AF_INET, { 0, }
};
static struct sockaddr_in6 sin6_def = {
sizeof (struct sockaddr_in6), AF_INET6, 0, 0, IN6ADDR_ANY_INIT, 0
};
static unsigned int primary_ifscope = IFSCOPE_NONE;
static unsigned int primary6_ifscope = IFSCOPE_NONE;
#define INET_DEFAULT(sa) \
((sa)->sa_family == AF_INET && SIN(sa)->sin_addr.s_addr == 0)
#define INET6_DEFAULT(sa) \
((sa)->sa_family == AF_INET6 && \
IN6_IS_ADDR_UNSPECIFIED(&SIN6(sa)->sin6_addr))
#define SA_DEFAULT(sa) (INET_DEFAULT(sa) || INET6_DEFAULT(sa))
#define RT(r) ((struct rtentry *)r)
#define RN(r) ((struct radix_node *)r)
#define RT_HOST(r) (RT(r)->rt_flags & RTF_HOST)
SYSCTL_DECL(_net_idle_route);
static int rt_if_idle_expire_timeout = RT_IF_IDLE_EXPIRE_TIMEOUT;
SYSCTL_INT(_net_idle_route, OID_AUTO, expire_timeout, CTLFLAG_RW,
&rt_if_idle_expire_timeout, 0, "Default expiration time on routes for "
"interface idle reference counting");
boolean_t
rt_primary_default(struct rtentry *rt, struct sockaddr *dst)
{
return (SA_DEFAULT(dst) && !(rt->rt_flags & RTF_IFSCOPE));
}
void
set_primary_ifscope(int af, unsigned int ifscope)
{
if (af == AF_INET)
primary_ifscope = ifscope;
else
primary6_ifscope = ifscope;
}
unsigned int
get_primary_ifscope(int af)
{
return (af == AF_INET ? primary_ifscope : primary6_ifscope);
}
void
sin_set_ifscope(struct sockaddr *sa, unsigned int ifscope)
{
ASSERT_SINIFSCOPE(sa);
SINIFSCOPE(sa)->sin_scope_id = ifscope;
}
static inline void
sin6_set_ifscope(struct sockaddr *sa, unsigned int ifscope)
{
ASSERT_SIN6IFSCOPE(sa);
SIN6IFSCOPE(sa)->sin6_scope_id = ifscope;
}
unsigned int
sin_get_ifscope(struct sockaddr *sa)
{
ASSERT_SINIFSCOPE(sa);
return (SINIFSCOPE(sa)->sin_scope_id);
}
unsigned int
sin6_get_ifscope(struct sockaddr *sa)
{
ASSERT_SIN6IFSCOPE(sa);
return (SIN6IFSCOPE(sa)->sin6_scope_id);
}
static inline void
sin6_set_embedded_ifscope(struct sockaddr *sa, unsigned int ifscope)
{
ASSERT_SIN6IFSCOPE(sa);
VERIFY(IN6_IS_SCOPE_EMBED(&(SIN6(sa)->sin6_addr)));
SIN6(sa)->sin6_addr.s6_addr16[1] = htons(ifscope);
}
static inline unsigned int
sin6_get_embedded_ifscope(struct sockaddr *sa)
{
ASSERT_SIN6IFSCOPE(sa);
return (ntohs(SIN6(sa)->sin6_addr.s6_addr16[1]));
}
static struct sockaddr *
sa_copy(struct sockaddr *src, struct sockaddr_storage *dst,
unsigned int *pifscope)
{
int af = src->sa_family;
unsigned int ifscope = (pifscope != NULL) ? *pifscope : IFSCOPE_NONE;
VERIFY(af == AF_INET || af == AF_INET6);
bzero(dst, sizeof (*dst));
if (af == AF_INET) {
bcopy(src, dst, sizeof (struct sockaddr_in));
if (pifscope == NULL || ifscope != IFSCOPE_NONE)
sin_set_ifscope(SA(dst), ifscope);
} else {
bcopy(src, dst, sizeof (struct sockaddr_in6));
if (pifscope != NULL &&
IN6_IS_SCOPE_EMBED(&SIN6(dst)->sin6_addr)) {
unsigned int eifscope;
eifscope = sin6_get_embedded_ifscope(SA(dst));
if (eifscope != IFSCOPE_NONE && ifscope == IFSCOPE_NONE)
ifscope = eifscope;
sin6_set_ifscope(SA(dst), ifscope);
if (ifscope != IFSCOPE_NONE && eifscope != ifscope)
sin6_set_embedded_ifscope(SA(dst), ifscope);
} else if (pifscope == NULL || ifscope != IFSCOPE_NONE) {
sin6_set_ifscope(SA(dst), ifscope);
}
}
if (pifscope != NULL) {
*pifscope = (af == AF_INET) ? sin_get_ifscope(SA(dst)) :
sin6_get_ifscope(SA(dst));
}
return (SA(dst));
}
static struct sockaddr *
ma_copy(int af, struct sockaddr *src, struct sockaddr_storage *dst,
unsigned int ifscope)
{
VERIFY(af == AF_INET || af == AF_INET6);
bzero(dst, sizeof (*dst));
rt_maskedcopy(src, SA(dst), src);
if (af == AF_INET) {
SINIFSCOPE(dst)->sin_scope_id = ifscope;
SINIFSCOPE(dst)->sin_len =
offsetof(struct sockaddr_inifscope, sin_scope_id) +
sizeof (SINIFSCOPE(dst)->sin_scope_id);
} else {
SIN6IFSCOPE(dst)->sin6_scope_id = ifscope;
SIN6IFSCOPE(dst)->sin6_len =
offsetof(struct sockaddr_in6, sin6_scope_id) +
sizeof (SIN6IFSCOPE(dst)->sin6_scope_id);
}
return (SA(dst));
}
static struct sockaddr *
sa_trim(struct sockaddr *sa, int skip)
{
caddr_t cp, base = (caddr_t)sa + skip;
if (sa->sa_len <= skip)
return (sa);
for (cp = base + (sa->sa_len - skip); cp > base && cp[-1] == 0;)
cp--;
sa->sa_len = (cp - base) + skip;
if (sa->sa_len < skip) {
panic("%s: broken logic (sa_len %d < skip %d )", __func__,
sa->sa_len, skip);
} else if (sa->sa_len == skip) {
sa->sa_len = 0;
}
return (sa);
}
struct sockaddr *
rtm_scrub_ifscope(int type, int idx, struct sockaddr *hint, struct sockaddr *sa,
struct sockaddr_storage *ss)
{
struct sockaddr *ret = sa;
switch (idx) {
case RTAX_DST:
if (sa->sa_family == AF_INET &&
SINIFSCOPE(sa)->sin_scope_id != IFSCOPE_NONE) {
ret = sa_copy(sa, ss, NULL);
} else if (sa->sa_family == AF_INET6 &&
SIN6IFSCOPE(sa)->sin6_scope_id != IFSCOPE_NONE) {
ret = sa_copy(sa, ss, NULL);
}
break;
case RTAX_NETMASK: {
int skip, af;
if (hint == NULL ||
((af = hint->sa_family) != AF_INET && af != AF_INET6))
break;
skip = (af == AF_INET) ?
offsetof(struct sockaddr_in, sin_addr) :
offsetof(struct sockaddr_in6, sin6_addr);
if (sa->sa_len > skip && sa->sa_len <= sizeof (*ss)) {
bzero(ss, sizeof (*ss));
bcopy(sa, ss, sa->sa_len);
if (hint->sa_family == AF_INET)
SINIFSCOPE(ss)->sin_scope_id = IFSCOPE_NONE;
else
SIN6IFSCOPE(ss)->sin6_scope_id = IFSCOPE_NONE;
ret = sa_trim(SA(ss), skip);
if (hint->sa_family == AF_INET6 &&
type != RTM_GET && type != RTM_GET2)
SA(ret)->sa_len = sizeof (struct sockaddr_in6);
}
break;
}
default:
break;
}
return (ret);
}
static int
rn_match_ifscope(struct radix_node *rn, void *arg)
{
struct rtentry *rt = (struct rtentry *)rn;
struct matchleaf_arg *ma = arg;
int af = rt_key(rt)->sa_family;
if (!(rt->rt_flags & RTF_IFSCOPE) || (af != AF_INET && af != AF_INET6))
return (0);
return (af == AF_INET ?
(SINIFSCOPE(rt_key(rt))->sin_scope_id == ma->ifscope) :
(SIN6IFSCOPE(rt_key(rt))->sin6_scope_id == ma->ifscope));
}
static void
rtable_init(void **table)
{
struct domain *dom;
for (dom = domains; dom; dom = dom->dom_next)
if (dom->dom_rtattach)
dom->dom_rtattach(&table[dom->dom_family],
dom->dom_rtoffset);
}
void
route_init(void)
{
int size;
PE_parse_boot_argn("rte_debug", &rte_debug, sizeof (rte_debug));
if (rte_debug != 0)
rte_debug |= RTD_DEBUG;
rnh_lock_grp_attr = lck_grp_attr_alloc_init();
rnh_lock_grp = lck_grp_alloc_init("route", rnh_lock_grp_attr);
rnh_lock_attr = lck_attr_alloc_init();
if ((rnh_lock = lck_mtx_alloc_init(rnh_lock_grp,
rnh_lock_attr)) == NULL) {
printf("route_init: can't alloc rnh_lock\n");
return;
}
rte_mtx_grp_attr = lck_grp_attr_alloc_init();
rte_mtx_grp = lck_grp_alloc_init(RTE_NAME, rte_mtx_grp_attr);
rte_mtx_attr = lck_attr_alloc_init();
lck_mtx_lock(rnh_lock);
rn_init();
lck_mtx_unlock(rnh_lock);
rtable_init((void **)rt_tables);
route_domain_mtx = routedomain.dom_mtx;
if (rte_debug & RTD_DEBUG)
size = sizeof (struct rtentry_dbg);
else
size = sizeof (struct rtentry);
rte_zone = zinit(size, RTE_ZONE_MAX * size, 0, RTE_ZONE_NAME);
if (rte_zone == NULL)
panic("route_init: failed allocating rte_zone");
zone_change(rte_zone, Z_EXPAND, TRUE);
zone_change(rte_zone, Z_CALLERACCT, FALSE);
zone_change(rte_zone, Z_NOENCRYPT, TRUE);
TAILQ_INIT(&rttrash_head);
}
void
routegenid_update(void)
{
(void) atomic_add_32_ov(&route_generation, 1);
}
void
rtalloc(struct route *ro)
{
rtalloc_ign(ro, 0);
}
void
rtalloc_scoped(struct route *ro, unsigned int ifscope)
{
rtalloc_scoped_ign(ro, 0, ifscope);
}
static void
rtalloc_ign_common_locked(struct route *ro, uint32_t ignore,
unsigned int ifscope)
{
struct rtentry *rt;
if ((rt = ro->ro_rt) != NULL) {
RT_LOCK_SPIN(rt);
if (rt->rt_ifp != NULL && (rt->rt_flags & RTF_UP) &&
rt->generation_id == route_generation) {
RT_UNLOCK(rt);
return;
}
RT_UNLOCK(rt);
rtfree_locked(rt);
ro->ro_rt = NULL;
}
ro->ro_rt = rtalloc1_common_locked(&ro->ro_dst, 1, ignore, ifscope);
if (ro->ro_rt != NULL) {
ro->ro_rt->generation_id = route_generation;
RT_LOCK_ASSERT_NOTHELD(ro->ro_rt);
}
}
void
rtalloc_ign(struct route *ro, uint32_t ignore)
{
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(rnh_lock);
rtalloc_ign_common_locked(ro, ignore, IFSCOPE_NONE);
lck_mtx_unlock(rnh_lock);
}
void
rtalloc_scoped_ign(struct route *ro, uint32_t ignore, unsigned int ifscope)
{
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(rnh_lock);
rtalloc_ign_common_locked(ro, ignore, ifscope);
lck_mtx_unlock(rnh_lock);
}
static struct rtentry *
rtalloc1_locked(struct sockaddr *dst, int report, uint32_t ignflags)
{
return (rtalloc1_common_locked(dst, report, ignflags, IFSCOPE_NONE));
}
struct rtentry *
rtalloc1_scoped_locked(struct sockaddr *dst, int report, uint32_t ignflags,
unsigned int ifscope)
{
return (rtalloc1_common_locked(dst, report, ignflags, ifscope));
}
static struct rtentry *
rtalloc1_common_locked(struct sockaddr *dst, int report, uint32_t ignflags,
unsigned int ifscope)
{
struct radix_node_head *rnh = rt_tables[dst->sa_family];
struct rtentry *rt, *newrt = NULL;
struct rt_addrinfo info;
uint32_t nflags;
int err = 0, msgtype = RTM_MISS;
if (rnh == NULL)
goto unreachable;
rt = rt_lookup(FALSE, dst, NULL, rnh, ifscope);
if (rt == NULL)
goto unreachable;
RT_LOCK_SPIN(rt);
newrt = rt;
nflags = rt->rt_flags & ~ignflags;
RT_UNLOCK(rt);
if (report && (nflags & (RTF_CLONING | RTF_PRCLONING))) {
err = rtrequest_locked(RTM_RESOLVE, dst, NULL, NULL, 0, &newrt);
if (err) {
newrt = rt;
goto miss;
}
rtfree_locked(rt);
if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) {
msgtype = RTM_RESOLVE;
goto miss;
}
}
goto done;
unreachable:
rtstat.rts_unreach++;
miss:
if (report) {
bzero((caddr_t)&info, sizeof(info));
info.rti_info[RTAX_DST] = dst;
rt_missmsg(msgtype, &info, 0, err);
}
done:
return (newrt);
}
struct rtentry *
rtalloc1(struct sockaddr *dst, int report, uint32_t ignflags)
{
struct rtentry * entry;
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(rnh_lock);
entry = rtalloc1_locked(dst, report, ignflags);
lck_mtx_unlock(rnh_lock);
return (entry);
}
struct rtentry *
rtalloc1_scoped(struct sockaddr *dst, int report, uint32_t ignflags,
unsigned int ifscope)
{
struct rtentry * entry;
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(rnh_lock);
entry = rtalloc1_scoped_locked(dst, report, ignflags, ifscope);
lck_mtx_unlock(rnh_lock);
return (entry);
}
void
rtfree_locked(struct rtentry *rt)
{
rtfree_common(rt, TRUE);
}
static void
rtfree_common(struct rtentry *rt, boolean_t locked)
{
struct radix_node_head *rnh;
RT_LOCK_SPIN(rt);
if (rtunref(rt) > 0) {
RT_UNLOCK(rt);
return;
}
if (!locked) {
RT_ADDREF_LOCKED(rt);
RT_UNLOCK(rt);
lck_mtx_lock(rnh_lock);
RT_LOCK_SPIN(rt);
RT_REMREF_LOCKED(rt);
if (rt->rt_refcnt > 0) {
RT_UNLOCK(rt);
goto done;
}
}
RT_CONVERT_LOCK(rt);
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
if (rt->rt_refcnt != 0)
panic("rt %p invalid refcnt %d", rt, rt->rt_refcnt);
rnh = rt_tables[rt_key(rt)->sa_family];
if (rnh != NULL && rnh->rnh_close != NULL)
rnh->rnh_close((struct radix_node *)rt, rnh);
if (!(rt->rt_flags & RTF_UP)) {
struct rtentry *rt_parent;
struct ifaddr *rt_ifa;
if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT))
panic("rt %p freed while in radix tree\n", rt);
(void) OSDecrementAtomic(&rttrash);
if (rte_debug & RTD_DEBUG) {
TAILQ_REMOVE(&rttrash_head, (struct rtentry_dbg *)rt,
rtd_trash_link);
}
if ((rt_parent = rt->rt_parent) != NULL)
rt->rt_parent = NULL;
if ((rt_ifa = rt->rt_ifa) != NULL)
rt->rt_ifa = NULL;
if (rt->rt_llinfo != NULL) {
if (rt->rt_llinfo_free != NULL)
(*rt->rt_llinfo_free)(rt->rt_llinfo);
else
R_Free(rt->rt_llinfo);
rt->rt_llinfo = NULL;
}
RT_UNLOCK(rt);
if (rt_parent != NULL)
rtfree_locked(rt_parent);
if (rt_ifa != NULL)
IFA_REMREF(rt_ifa);
R_Free(rt_key(rt));
nstat_route_detach(rt);
rte_lock_destroy(rt);
rte_free(rt);
} else {
RT_UNLOCK(rt);
}
done:
if (!locked)
lck_mtx_unlock(rnh_lock);
}
void
rtfree(struct rtentry *rt)
{
rtfree_common(rt, FALSE);
}
int
rtunref(struct rtentry *p)
{
RT_LOCK_ASSERT_HELD(p);
if (p->rt_refcnt == 0)
panic("%s(%p) bad refcnt\n", __func__, p);
--p->rt_refcnt;
if (rte_debug & RTD_DEBUG)
rtunref_audit((struct rtentry_dbg *)p);
return (p->rt_refcnt);
}
static inline void
rtunref_audit(struct rtentry_dbg *rte)
{
uint16_t idx;
if (rte->rtd_inuse != RTD_INUSE)
panic("rtunref: on freed rte=%p\n", rte);
idx = atomic_add_16_ov(&rte->rtd_refrele_cnt, 1) % CTRACE_HIST_SIZE;
if (rte_debug & RTD_TRACE)
ctrace_record(&rte->rtd_refrele[idx]);
}
void
rtref(struct rtentry *p)
{
RT_LOCK_ASSERT_HELD(p);
if (++p->rt_refcnt == 0)
panic("%s(%p) bad refcnt\n", __func__, p);
if (rte_debug & RTD_DEBUG)
rtref_audit((struct rtentry_dbg *)p);
}
static inline void
rtref_audit(struct rtentry_dbg *rte)
{
uint16_t idx;
if (rte->rtd_inuse != RTD_INUSE)
panic("rtref_audit: on freed rte=%p\n", rte);
idx = atomic_add_16_ov(&rte->rtd_refhold_cnt, 1) % CTRACE_HIST_SIZE;
if (rte_debug & RTD_TRACE)
ctrace_record(&rte->rtd_refhold[idx]);
}
void
rtsetifa(struct rtentry *rt, struct ifaddr* ifa)
{
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
RT_LOCK_ASSERT_HELD(rt);
if (rt->rt_ifa == ifa)
return;
RT_CONVERT_LOCK(rt);
if (rt->rt_ifa)
IFA_REMREF(rt->rt_ifa);
rt->rt_ifa = ifa;
if (rt->rt_ifa)
IFA_ADDREF(rt->rt_ifa);
}
void
rtredirect(struct ifnet *ifp, struct sockaddr *dst, struct sockaddr *gateway,
struct sockaddr *netmask, int flags, struct sockaddr *src,
struct rtentry **rtp)
{
struct rtentry *rt = NULL;
int error = 0;
short *stat = 0;
struct rt_addrinfo info;
struct ifaddr *ifa = NULL;
unsigned int ifscope = (ifp != NULL) ? ifp->if_index : IFSCOPE_NONE;
struct sockaddr_storage ss;
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(rnh_lock);
#if INET6
if ((src->sa_family == AF_INET && ip_doscopedroute) ||
(src->sa_family == AF_INET6 && ip6_doscopedroute))
#else
if (src->sa_family == AF_INET && ip_doscopedroute)
#endif
src = sa_copy(src, &ss, &ifscope);
if ((ifa = ifa_ifwithnet_scoped(gateway, ifscope)) == NULL) {
error = ENETUNREACH;
goto out;
}
rt = rtalloc1_scoped_locked(dst, 0, RTF_CLONING|RTF_PRCLONING, ifscope);
if (rt != NULL)
RT_LOCK(rt);
if (!(flags & RTF_DONE) && rt != NULL &&
(!equal(src, rt->rt_gateway) || !equal(rt->rt_ifa->ifa_addr,
ifa->ifa_addr))) {
error = EINVAL;
} else {
IFA_REMREF(ifa);
if ((ifa = ifa_ifwithaddr(gateway))) {
IFA_REMREF(ifa);
ifa = NULL;
error = EHOSTUNREACH;
}
}
if (ifa) {
IFA_REMREF(ifa);
ifa = NULL;
}
if (error) {
if (rt != NULL)
RT_UNLOCK(rt);
goto done;
}
if ((rt == NULL) || (rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2))
goto create;
RT_LOCK_ASSERT_HELD(rt);
if (rt->rt_flags & RTF_GATEWAY) {
if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
create:
if (rt != NULL)
RT_UNLOCK(rt);
flags |= RTF_GATEWAY | RTF_DYNAMIC;
error = rtrequest_scoped_locked(RTM_ADD, dst,
gateway, netmask, flags, NULL, ifscope);
stat = &rtstat.rts_dynamic;
} else {
rt->rt_flags |= RTF_MODIFIED;
flags |= RTF_MODIFIED;
stat = &rtstat.rts_newgateway;
error = rt_setgate(rt, rt_key(rt), gateway);
RT_UNLOCK(rt);
}
} else {
RT_UNLOCK(rt);
error = EHOSTUNREACH;
}
done:
if (rt != NULL) {
RT_LOCK_ASSERT_NOTHELD(rt);
if (rtp && !error)
*rtp = rt;
else
rtfree_locked(rt);
}
out:
if (error) {
rtstat.rts_badredirect++;
} else {
if (stat != NULL)
(*stat)++;
if (use_routegenid)
routegenid_update();
}
lck_mtx_unlock(rnh_lock);
bzero((caddr_t)&info, sizeof(info));
info.rti_info[RTAX_DST] = dst;
info.rti_info[RTAX_GATEWAY] = gateway;
info.rti_info[RTAX_NETMASK] = netmask;
info.rti_info[RTAX_AUTHOR] = src;
rt_missmsg(RTM_REDIRECT, &info, flags, error);
}
int
rtioctl(unsigned long req, caddr_t data, struct proc *p)
{
#pragma unused(p)
#if INET && MROUTING
return mrt_ioctl(req, data);
#else
#pragma unused(req)
#pragma unused(data)
return ENXIO;
#endif
}
struct ifaddr *
ifa_ifwithroute(
int flags,
const struct sockaddr *dst,
const struct sockaddr *gateway)
{
struct ifaddr *ifa;
lck_mtx_lock(rnh_lock);
ifa = ifa_ifwithroute_locked(flags, dst, gateway);
lck_mtx_unlock(rnh_lock);
return (ifa);
}
struct ifaddr *
ifa_ifwithroute_locked(int flags, const struct sockaddr *dst,
const struct sockaddr *gateway)
{
return (ifa_ifwithroute_common_locked((flags & ~RTF_IFSCOPE), dst,
gateway, IFSCOPE_NONE));
}
struct ifaddr *
ifa_ifwithroute_scoped_locked(int flags, const struct sockaddr *dst,
const struct sockaddr *gateway, unsigned int ifscope)
{
if (ifscope != IFSCOPE_NONE)
flags |= RTF_IFSCOPE;
else
flags &= ~RTF_IFSCOPE;
return (ifa_ifwithroute_common_locked(flags, dst, gateway, ifscope));
}
static struct ifaddr *
ifa_ifwithroute_common_locked(int flags, const struct sockaddr *dst,
const struct sockaddr *gw, unsigned int ifscope)
{
struct ifaddr *ifa = NULL;
struct rtentry *rt = NULL;
struct sockaddr_storage dst_ss, gw_ss;
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
#if INET6
if (dst != NULL &&
((dst->sa_family == AF_INET && ip_doscopedroute) ||
(dst->sa_family == AF_INET6 && ip6_doscopedroute)))
#else
if (dst != NULL && dst->sa_family == AF_INET && ip_doscopedroute)
#endif
dst = sa_copy(SA(dst), &dst_ss, NULL);
#if INET6
if (gw != NULL &&
((gw->sa_family == AF_INET && ip_doscopedroute) ||
(gw->sa_family == AF_INET6 && ip6_doscopedroute)))
#else
if (gw != NULL && gw->sa_family == AF_INET && ip_doscopedroute)
#endif
gw = sa_copy(SA(gw), &gw_ss, NULL);
if (!(flags & RTF_GATEWAY)) {
if (flags & RTF_HOST) {
ifa = ifa_ifwithdstaddr(dst);
}
if (ifa == NULL)
ifa = ifa_ifwithaddr_scoped(gw, ifscope);
} else {
ifa = ifa_ifwithdstaddr(gw);
}
if (ifa == NULL)
ifa = ifa_ifwithnet_scoped(gw, ifscope);
if (ifa == NULL) {
rt = rtalloc1_scoped_locked((struct sockaddr *)(size_t)dst,
0, 0, ifscope);
if (rt != NULL) {
RT_LOCK_SPIN(rt);
ifa = rt->rt_ifa;
if (ifa != NULL) {
RT_CONVERT_LOCK(rt);
IFA_ADDREF(ifa);
}
RT_REMREF_LOCKED(rt);
RT_UNLOCK(rt);
rt = NULL;
}
}
if (ifa != NULL && ifa->ifa_addr->sa_family != dst->sa_family) {
struct ifaddr *newifa;
newifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
if (newifa != NULL) {
IFA_REMREF(ifa);
ifa = newifa;
}
}
if ((ifa == NULL ||
!equal(ifa->ifa_addr, (struct sockaddr *)(size_t)gw)) &&
(rt = rtalloc1_scoped_locked((struct sockaddr *)(size_t)gw,
0, 0, ifscope)) != NULL) {
if (ifa != NULL)
IFA_REMREF(ifa);
RT_LOCK_SPIN(rt);
ifa = rt->rt_ifa;
if (ifa != NULL) {
RT_CONVERT_LOCK(rt);
IFA_ADDREF(ifa);
}
RT_REMREF_LOCKED(rt);
RT_UNLOCK(rt);
}
if ((flags & RTF_IFSCOPE) &&
ifa != NULL && ifa->ifa_ifp->if_index != ifscope) {
IFA_REMREF(ifa);
ifa = NULL;
}
return (ifa);
}
static int rt_fixdelete(struct radix_node *, void *);
static int rt_fixchange(struct radix_node *, void *);
struct rtfc_arg {
struct rtentry *rt0;
struct radix_node_head *rnh;
};
int
rtrequest_locked(int req, struct sockaddr *dst, struct sockaddr *gateway,
struct sockaddr *netmask, int flags, struct rtentry **ret_nrt)
{
return (rtrequest_common_locked(req, dst, gateway, netmask,
(flags & ~RTF_IFSCOPE), ret_nrt, IFSCOPE_NONE));
}
int
rtrequest_scoped_locked(int req, struct sockaddr *dst,
struct sockaddr *gateway, struct sockaddr *netmask, int flags,
struct rtentry **ret_nrt, unsigned int ifscope)
{
if (ifscope != IFSCOPE_NONE)
flags |= RTF_IFSCOPE;
else
flags &= ~RTF_IFSCOPE;
return (rtrequest_common_locked(req, dst, gateway, netmask,
flags, ret_nrt, ifscope));
}
static int
rtrequest_common_locked(int req, struct sockaddr *dst0,
struct sockaddr *gateway, struct sockaddr *netmask, int flags,
struct rtentry **ret_nrt, unsigned int ifscope)
{
int error = 0;
struct rtentry *rt;
struct radix_node *rn;
struct radix_node_head *rnh;
struct ifaddr *ifa = NULL;
struct sockaddr *ndst, *dst = dst0;
struct sockaddr_storage ss, mask;
struct timeval curr_calendartime;
int af = dst->sa_family;
void (*ifa_rtrequest)(int, struct rtentry *, struct sockaddr *);
#define senderr(x) { error = x ; goto bad; }
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
if ((rnh = rt_tables[af]) == NULL)
senderr(ESRCH);
if (flags & RTF_HOST)
netmask = NULL;
#if INET6
if (req != RTM_RESOLVE &&
((af == AF_INET && ip_doscopedroute) ||
(af == AF_INET6 && ip6_doscopedroute))) {
#else
if (req != RTM_RESOLVE && af == AF_INET && ip_doscopedroute) {
#endif
dst = sa_copy(dst, &ss, &ifscope);
if (netmask != NULL)
netmask = ma_copy(af, netmask, &mask, ifscope);
if (ifscope != IFSCOPE_NONE)
flags |= RTF_IFSCOPE;
} else {
if ((flags & RTF_IFSCOPE) && (af != AF_INET && af != AF_INET6))
senderr(EINVAL);
#if INET6
if ((af == AF_INET && !ip_doscopedroute) ||
(af == AF_INET6 && !ip6_doscopedroute))
#else
if (af == AF_INET && !ip_doscopedroute)
#endif
ifscope = IFSCOPE_NONE;
}
if (ifscope == IFSCOPE_NONE)
flags &= ~RTF_IFSCOPE;
switch (req) {
case RTM_DELETE: {
struct rtentry *gwrt = NULL;
if ((rn = rnh->rnh_deladdr(dst, netmask, rnh)) == NULL)
senderr(ESRCH);
if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT))
panic ("rtrequest delete");
rt = (struct rtentry *)rn;
RT_LOCK(rt);
RT_ADDREF_LOCKED(rt);
rt->rt_flags &= ~RTF_UP;
rt->rt_flags |= RTF_CONDEMNED;
if ((rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) &&
rt_mask(rt)) {
RT_UNLOCK(rt);
rnh->rnh_walktree_from(rnh, dst, rt_mask(rt),
rt_fixdelete, rt);
RT_LOCK(rt);
}
if ((gwrt = rt->rt_gwroute) != NULL)
rt->rt_gwroute = NULL;
if ((ifa = rt->rt_ifa) != NULL) {
IFA_LOCK_SPIN(ifa);
ifa_rtrequest = ifa->ifa_rtrequest;
IFA_UNLOCK(ifa);
if (ifa_rtrequest != NULL)
ifa_rtrequest(RTM_DELETE, rt, NULL);
ifa = NULL;
}
(void) OSIncrementAtomic(&rttrash);
if (rte_debug & RTD_DEBUG) {
TAILQ_INSERT_TAIL(&rttrash_head,
(struct rtentry_dbg *)rt, rtd_trash_link);
}
if (rt_primary_default(rt, rt_key(rt))) {
set_primary_ifscope(rt_key(rt)->sa_family,
IFSCOPE_NONE);
}
rt_clear_idleref(rt);
RT_UNLOCK(rt);
if (gwrt != NULL)
rtfree_locked(gwrt);
if (ret_nrt != NULL) {
*ret_nrt = rt;
} else {
rtfree_locked(rt);
}
break;
}
case RTM_RESOLVE:
if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
senderr(EINVAL);
ifa = rt->rt_ifa;
IFA_ADDREF(ifa);
flags = rt->rt_flags &
~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC);
flags |= RTF_WASCLONED;
gateway = rt->rt_gateway;
if ((netmask = rt->rt_genmask) == NULL)
flags |= RTF_HOST;
#if INET6
if ((af != AF_INET && af != AF_INET6) ||
(af == AF_INET && !ip_doscopedroute) ||
(af == AF_INET6 && !ip6_doscopedroute))
#else
if (af != AF_INET || !ip_doscopedroute)
#endif
goto makeroute;
if (af == AF_INET &&
IN_LINKLOCAL(ntohl(SIN(dst)->sin_addr.s_addr))) {
ifscope = IFSCOPE_NONE;
flags &= ~RTF_IFSCOPE;
} else {
if (flags & RTF_IFSCOPE) {
ifscope = (af == AF_INET) ?
sin_get_ifscope(rt_key(rt)) :
sin6_get_ifscope(rt_key(rt));
} else {
ifscope = rt->rt_ifp->if_index;
flags |= RTF_IFSCOPE;
}
VERIFY(ifscope != IFSCOPE_NONE);
}
dst = sa_copy(dst, &ss, (ifscope == IFSCOPE_NONE) ?
NULL : &ifscope);
if (netmask != NULL)
netmask = ma_copy(af, netmask, &mask, ifscope);
goto makeroute;
case RTM_ADD:
if ((flags & RTF_GATEWAY) && !gateway)
panic("rtrequest: RTF_GATEWAY but no gateway");
if (flags & RTF_IFSCOPE) {
ifa = ifa_ifwithroute_scoped_locked(flags, dst0,
gateway, ifscope);
} else {
ifa = ifa_ifwithroute_locked(flags, dst0, gateway);
}
if (ifa == NULL)
senderr(ENETUNREACH);
makeroute:
getmicrotime(&curr_calendartime);
if ((rt = rte_alloc()) == NULL)
senderr(ENOBUFS);
Bzero(rt, sizeof(*rt));
rte_lock_init(rt);
rt->base_calendartime = curr_calendartime.tv_sec;
rt->base_uptime = net_uptime();
RT_LOCK(rt);
rt->rt_flags = RTF_UP | flags;
if ((error = rt_setgate(rt, dst, gateway)) != 0) {
RT_UNLOCK(rt);
nstat_route_detach(rt);
rte_lock_destroy(rt);
rte_free(rt);
senderr(error);
}
ndst = rt_key(rt);
if (netmask)
rt_maskedcopy(dst, ndst, netmask);
else
Bcopy(dst, ndst, dst->sa_len);
rtsetifa(rt, ifa);
rt->rt_ifp = rt->rt_ifa->ifa_ifp;
rn = rnh->rnh_addaddr((caddr_t)ndst, (caddr_t)netmask,
rnh, rt->rt_nodes);
if (rn == 0) {
struct rtentry *rt2;
if (flags & RTF_IFSCOPE) {
rt2 = rtalloc1_scoped_locked(dst0, 0,
RTF_CLONING | RTF_PRCLONING, ifscope);
} else {
rt2 = rtalloc1_locked(dst, 0,
RTF_CLONING | RTF_PRCLONING);
}
if (rt2 && rt2->rt_parent) {
(void) rtrequest_locked(RTM_DELETE, rt_key(rt2),
rt2->rt_gateway, rt_mask(rt2),
rt2->rt_flags, 0);
rtfree_locked(rt2);
rn = rnh->rnh_addaddr((caddr_t)ndst,
(caddr_t)netmask,
rnh, rt->rt_nodes);
} else if (rt2) {
rtfree_locked(rt2);
}
}
if (rn == NULL) {
if (rt->rt_gwroute) {
rtfree_locked(rt->rt_gwroute);
rt->rt_gwroute = NULL;
}
if (rt->rt_ifa) {
IFA_REMREF(rt->rt_ifa);
rt->rt_ifa = NULL;
}
R_Free(rt_key(rt));
RT_UNLOCK(rt);
nstat_route_detach(rt);
rte_lock_destroy(rt);
rte_free(rt);
senderr(EEXIST);
}
rt->rt_parent = NULL;
if (req == RTM_RESOLVE) {
RT_LOCK_SPIN(*ret_nrt);
VERIFY((*ret_nrt)->rt_expire == 0 || (*ret_nrt)->rt_rmx.rmx_expire != 0);
VERIFY((*ret_nrt)->rt_expire != 0 || (*ret_nrt)->rt_rmx.rmx_expire == 0);
rt->rt_rmx = (*ret_nrt)->rt_rmx;
rt_setexpire(rt, (*ret_nrt)->rt_expire);
if ((*ret_nrt)->rt_flags & (RTF_CLONING | RTF_PRCLONING)) {
rt->rt_parent = (*ret_nrt);
RT_ADDREF_LOCKED(*ret_nrt);
}
RT_UNLOCK(*ret_nrt);
if (rt->rt_parent != NULL &&
!(rt->rt_flags & (RTF_BROADCAST | RTF_MULTICAST))) {
rt_set_idleref(rt);
}
}
IFA_LOCK_SPIN(ifa);
ifa_rtrequest = ifa->ifa_rtrequest;
IFA_UNLOCK(ifa);
if (ifa_rtrequest != NULL)
ifa_rtrequest(req, rt, SA(ret_nrt ? *ret_nrt : NULL));
IFA_REMREF(ifa);
ifa = NULL;
if (rt_primary_default(rt, rt_key(rt))) {
set_primary_ifscope(rt_key(rt)->sa_family,
rt->rt_ifp->if_index);
}
if (ret_nrt) {
*ret_nrt = rt;
RT_ADDREF_LOCKED(rt);
}
if (req == RTM_ADD &&
!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL) {
struct rtfc_arg arg;
arg.rnh = rnh;
arg.rt0 = rt;
RT_UNLOCK(rt);
rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt),
rt_fixchange, &arg);
} else {
RT_UNLOCK(rt);
}
nstat_route_new_entry(rt);
break;
}
bad:
if (ifa)
IFA_REMREF(ifa);
return (error);
}
int
rtrequest(int req, struct sockaddr *dst, struct sockaddr *gateway,
struct sockaddr *netmask, int flags, struct rtentry **ret_nrt)
{
int error;
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(rnh_lock);
error = rtrequest_locked(req, dst, gateway, netmask, flags, ret_nrt);
lck_mtx_unlock(rnh_lock);
return (error);
}
int
rtrequest_scoped(int req, struct sockaddr *dst, struct sockaddr *gateway,
struct sockaddr *netmask, int flags, struct rtentry **ret_nrt,
unsigned int ifscope)
{
int error;
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(rnh_lock);
error = rtrequest_scoped_locked(req, dst, gateway, netmask, flags,
ret_nrt, ifscope);
lck_mtx_unlock(rnh_lock);
return (error);
}
static int
rt_fixdelete(struct radix_node *rn, void *vp)
{
struct rtentry *rt = (struct rtentry *)rn;
struct rtentry *rt0 = vp;
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
RT_LOCK(rt);
if (rt->rt_parent == rt0 &&
!(rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
RT_UNLOCK(rt);
return (rtrequest_locked(RTM_DELETE, rt_key(rt), NULL,
rt_mask(rt), rt->rt_flags, NULL));
}
RT_UNLOCK(rt);
return 0;
}
static int
rt_fixchange(struct radix_node *rn, void *vp)
{
struct rtentry *rt = (struct rtentry *)rn;
struct rtfc_arg *ap = vp;
struct rtentry *rt0 = ap->rt0;
struct radix_node_head *rnh = ap->rnh;
u_char *xk1, *xm1, *xk2, *xmp;
int i, len;
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
RT_LOCK(rt);
if (!rt->rt_parent ||
(rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
RT_UNLOCK(rt);
return (0);
}
if (rt->rt_parent == rt0)
goto delete_rt;
len = imin(rt_key(rt0)->sa_len, rt_key(rt)->sa_len);
xk1 = (u_char *)rt_key(rt0);
xm1 = (u_char *)rt_mask(rt0);
xk2 = (u_char *)rt_key(rt);
if ((xmp = (u_char *)rt_mask(rt->rt_parent)) != NULL) {
int mlen = rt_mask(rt->rt_parent)->sa_len;
if (mlen > rt_mask(rt0)->sa_len) {
RT_UNLOCK(rt);
return (0);
}
for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) {
if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) {
RT_UNLOCK(rt);
return (0);
}
}
}
for (i = rnh->rnh_treetop->rn_offset; i < len; i++) {
if ((xk2[i] & xm1[i]) != xk1[i]) {
RT_UNLOCK(rt);
return (0);
}
}
delete_rt:
RT_UNLOCK(rt);
return (rtrequest_locked(RTM_DELETE, rt_key(rt), NULL,
rt_mask(rt), rt->rt_flags, NULL));
}
#define SA_SIZE(x) (-(-((uintptr_t)(x)) & -(32)))
int
rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate)
{
int dlen = SA_SIZE(dst->sa_len), glen = SA_SIZE(gate->sa_len);
struct radix_node_head *rnh = rt_tables[dst->sa_family];
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
RT_LOCK_ASSERT_HELD(rt);
if (rt->rt_flags & RTF_CONDEMNED)
return (EBUSY);
RT_ADDREF_LOCKED(rt);
if (((rt->rt_flags & (RTF_HOST|RTF_GATEWAY|RTF_LLINFO)) ==
(RTF_HOST|RTF_GATEWAY)) && (dst->sa_len == gate->sa_len) &&
(bcmp(dst, gate, dst->sa_len) == 0)) {
if (rt_key(rt) != NULL) {
RT_UNLOCK(rt);
(void) rtrequest_locked(RTM_DELETE, rt_key(rt),
rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL);
RT_LOCK(rt);
}
RT_REMREF_LOCKED(rt);
return (EADDRNOTAVAIL);
}
if (rt->rt_flags & RTF_GATEWAY) {
struct rtentry *gwrt;
unsigned int ifscope;
if (dst->sa_family == AF_INET)
ifscope = sin_get_ifscope(dst);
else if (dst->sa_family == AF_INET6)
ifscope = sin6_get_ifscope(dst);
else
ifscope = IFSCOPE_NONE;
RT_UNLOCK(rt);
gwrt = rtalloc1_scoped_locked(gate, 1,
RTF_CLONING | RTF_PRCLONING, ifscope);
if (gwrt != NULL)
RT_LOCK_ASSERT_NOTHELD(gwrt);
RT_LOCK(rt);
if (gwrt == rt) {
RT_REMREF_LOCKED(gwrt);
RT_REMREF_LOCKED(rt);
return (EADDRINUSE);
}
if (ifscope != IFSCOPE_NONE && (rt->rt_flags & RTF_IFSCOPE) &&
gwrt != NULL && gwrt->rt_ifp != NULL &&
gwrt->rt_ifp->if_index != ifscope) {
rtfree_locked(gwrt);
RT_REMREF_LOCKED(rt);
return ((rt->rt_flags & RTF_HOST) ?
EHOSTUNREACH : ENETUNREACH);
}
if (rt->rt_flags & RTF_CONDEMNED) {
if (gwrt != NULL)
rtfree_locked(gwrt);
RT_REMREF_LOCKED(rt);
return (EBUSY);
}
if (rt->rt_gwroute != NULL)
rtfree_locked(rt->rt_gwroute);
rt->rt_gwroute = gwrt;
if (rt_primary_default(rt, dst) && rt->rt_ifp != NULL) {
set_primary_ifscope(dst->sa_family,
rt->rt_ifp->if_index);
}
if ((dst->sa_family == AF_INET) &&
gwrt != NULL && gwrt->rt_gateway->sa_family == AF_LINK &&
(gwrt->rt_ifp->if_index == get_primary_ifscope(AF_INET) ||
get_primary_ifscope(AF_INET) == IFSCOPE_NONE))
kdp_set_gateway_mac(SDL(gwrt->rt_gateway)->sdl_data);
}
if (rt->rt_gateway == NULL || glen > SA_SIZE(rt->rt_gateway->sa_len)) {
caddr_t new;
R_Malloc(new, caddr_t, dlen + glen);
if (new == NULL) {
if (rt->rt_gwroute != NULL)
rtfree_locked(rt->rt_gwroute);
rt->rt_gwroute = NULL;
RT_REMREF_LOCKED(rt);
return (ENOBUFS);
}
bzero(new, dlen + glen);
Bcopy(dst, new, dst->sa_len);
R_Free(rt_key(rt));
rt->rt_nodes->rn_key = new;
rt->rt_gateway = (struct sockaddr *)(new + dlen);
}
Bcopy(gate, rt->rt_gateway, gate->sa_len);
if ((rt->rt_flags & RTF_GATEWAY) && rt->rt_gwroute != NULL &&
(rt->rt_gwroute->rt_flags & RTF_IFSCOPE)) {
if (rt->rt_gateway->sa_family == AF_INET &&
rt_key(rt->rt_gwroute)->sa_family == AF_INET) {
sin_set_ifscope(rt->rt_gateway,
sin_get_ifscope(rt_key(rt->rt_gwroute)));
} else if (rt->rt_gateway->sa_family == AF_INET6 &&
rt_key(rt->rt_gwroute)->sa_family == AF_INET6) {
sin6_set_ifscope(rt->rt_gateway,
sin6_get_ifscope(rt_key(rt->rt_gwroute)));
}
}
if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != 0) {
struct rtfc_arg arg;
arg.rnh = rnh;
arg.rt0 = rt;
RT_UNLOCK(rt);
rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt),
rt_fixchange, &arg);
RT_LOCK(rt);
}
RT_REMREF_LOCKED(rt);
return (0);
}
#undef SA_SIZE
static void
rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst,
struct sockaddr *netmask)
{
u_char *cp1 = (u_char *)src;
u_char *cp2 = (u_char *)dst;
u_char *cp3 = (u_char *)netmask;
u_char *cplim = cp2 + *cp3;
u_char *cplim2 = cp2 + *cp1;
*cp2++ = *cp1++; *cp2++ = *cp1++;
cp3 += 2;
if (cplim > cplim2)
cplim = cplim2;
while (cp2 < cplim)
*cp2++ = *cp1++ & *cp3++;
if (cp2 < cplim2)
bzero((caddr_t)cp2, (unsigned)(cplim2 - cp2));
}
static struct radix_node *
node_lookup(struct sockaddr *dst, struct sockaddr *netmask,
unsigned int ifscope)
{
struct radix_node_head *rnh;
struct radix_node *rn;
struct sockaddr_storage ss, mask;
int af = dst->sa_family;
struct matchleaf_arg ma = { ifscope };
rn_matchf_t *f = rn_match_ifscope;
void *w = &ma;
if (af != AF_INET && af != AF_INET6)
return (NULL);
rnh = rt_tables[af];
dst = sa_copy(dst, &ss, (ifscope == IFSCOPE_NONE) ? NULL : &ifscope);
if (netmask != NULL)
netmask = ma_copy(af, netmask, &mask, ifscope);
if (ifscope == IFSCOPE_NONE)
f = w = NULL;
rn = rnh->rnh_lookup_args(dst, netmask, rnh, f, w);
if (rn != NULL && (rn->rn_flags & RNF_ROOT))
rn = NULL;
return (rn);
}
static struct radix_node *
node_lookup_default(int af)
{
struct radix_node_head *rnh;
VERIFY(af == AF_INET || af == AF_INET6);
rnh = rt_tables[af];
return (af == AF_INET ? rnh->rnh_lookup(&sin_def, NULL, rnh) :
rnh->rnh_lookup(&sin6_def, NULL, rnh));
}
struct rtentry *
rt_lookup(boolean_t lookup_only, struct sockaddr *dst, struct sockaddr *netmask,
struct radix_node_head *rnh, unsigned int ifscope)
{
struct radix_node *rn0, *rn;
boolean_t dontcare;
int af = dst->sa_family;
struct sockaddr_storage dst_ss, mask_ss;
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
if (!lookup_only)
netmask = NULL;
#if INET6
if ((af != AF_INET && af != AF_INET6) ||
(af == AF_INET && !ip_doscopedroute) ||
(af == AF_INET6 && !ip6_doscopedroute)) {
#else
if (af != AF_INET || !ip_doscopedroute) {
#endif
rn = rnh->rnh_matchaddr(dst, rnh);
if (rn != NULL && (rn->rn_flags & RNF_ROOT))
rn = NULL;
if (rn != NULL) {
RT_LOCK_SPIN(RT(rn));
if (!(RT(rn)->rt_flags & RTF_CONDEMNED)) {
RT_ADDREF_LOCKED(RT(rn));
RT_UNLOCK(RT(rn));
} else {
RT_UNLOCK(RT(rn));
rn = NULL;
}
}
return (RT(rn));
}
dst = sa_copy(dst, &dst_ss, &ifscope);
if (netmask != NULL)
netmask = ma_copy(af, netmask, &mask_ss, ifscope);
dontcare = (ifscope == IFSCOPE_NONE);
rn0 = rn = node_lookup(dst, netmask, IFSCOPE_NONE);
if (dontcare)
ifscope = get_primary_ifscope(af);
if (rn != NULL) {
struct rtentry *rt = RT(rn);
if (rt->rt_ifp != lo_ifp) {
if (rt->rt_ifp->if_index != ifscope) {
rn = NULL;
if (dontcare)
ifscope = rt->rt_ifp->if_index;
else
rn0 = NULL;
} else if (!(rt->rt_flags & RTF_IFSCOPE)) {
rn = NULL;
}
}
}
if (rn == NULL)
rn = node_lookup(dst, netmask, ifscope);
if (rn == NULL || (rn0 != NULL &&
((SA_DEFAULT(rt_key(RT(rn))) && !SA_DEFAULT(rt_key(RT(rn0)))) ||
(!RT_HOST(rn) && RT_HOST(rn0)))))
rn = rn0;
if (rn == NULL && (rn = node_lookup_default(af)) != NULL &&
RT(rn)->rt_ifp->if_index != ifscope)
rn = NULL;
if (rn != NULL) {
RT_LOCK_SPIN(RT(rn));
if (rt_validate(RT(rn))) {
RT_ADDREF_LOCKED(RT(rn));
RT_UNLOCK(RT(rn));
} else {
RT_UNLOCK(RT(rn));
rn = NULL;
}
}
return (RT(rn));
}
boolean_t
rt_validate(struct rtentry *rt)
{
RT_LOCK_ASSERT_HELD(rt);
if (!(rt->rt_flags & RTF_CONDEMNED)) {
int af = rt_key(rt)->sa_family;
if (af == AF_INET)
(void) in_validate(RN(rt));
else if (af == AF_INET6)
(void) in6_validate(RN(rt));
} else {
rt = NULL;
}
return (rt != NULL);
}
int
rtinit(struct ifaddr *ifa, int cmd, int flags)
{
int error;
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
lck_mtx_lock(rnh_lock);
error = rtinit_locked(ifa, cmd, flags);
lck_mtx_unlock(rnh_lock);
return (error);
}
int
rtinit_locked(struct ifaddr *ifa, int cmd, int flags)
{
struct rtentry *rt;
struct sockaddr *dst;
struct sockaddr *deldst;
struct mbuf *m = 0;
struct rtentry *nrt = 0;
u_int32_t ifa_flags;
int error;
dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr;
if (cmd == RTM_DELETE) {
if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
m = m_get(M_DONTWAIT, MT_SONAME);
if (m == NULL) {
return(ENOBUFS);
}
deldst = mtod(m, struct sockaddr *);
rt_maskedcopy(dst, deldst, ifa->ifa_netmask);
dst = deldst;
}
rt = rtalloc1_locked(dst, 0, 0);
if (rt) {
RT_LOCK_SPIN(rt);
if (rt->rt_ifa != ifa) {
RT_REMREF_LOCKED(rt);
RT_UNLOCK(rt);
if (m)
(void) m_free(m);
return (flags & RTF_HOST ? EHOSTUNREACH
: ENETUNREACH);
} else {
RT_REMREF_LOCKED(rt);
RT_UNLOCK(rt);
}
}
#if 0
else {
lck_mtx_unlock(rnh_lock);
return (flags & RTF_HOST ? EHOSTUNREACH
: ENETUNREACH);
}
#endif
}
IFA_LOCK_SPIN(ifa);
ifa_flags = ifa->ifa_flags;
IFA_UNLOCK(ifa);
error = rtrequest_locked(cmd, dst, ifa->ifa_addr, ifa->ifa_netmask,
flags | ifa_flags, &nrt);
if (m)
(void) m_free(m);
if (cmd == RTM_DELETE && error == 0 && (rt = nrt)) {
RT_LOCK(rt);
rt_newaddrmsg(cmd, ifa, error, nrt);
if (use_routegenid)
routegenid_update();
RT_UNLOCK(rt);
rtfree_locked(rt);
}
if (cmd == RTM_ADD && error == 0 && (rt = nrt)) {
RT_LOCK(rt);
if (rt->rt_ifa != ifa) {
void (*ifa_rtrequest)
(int, struct rtentry *, struct sockaddr *);
if (!(rt->rt_ifa->ifa_ifp->if_flags &
(IFF_POINTOPOINT|IFF_LOOPBACK)))
printf("rtinit: wrong ifa (%p) was (%p)\n",
ifa, rt->rt_ifa);
IFA_LOCK_SPIN(rt->rt_ifa);
ifa_rtrequest = rt->rt_ifa->ifa_rtrequest;
IFA_UNLOCK(rt->rt_ifa);
if (ifa_rtrequest != NULL)
ifa_rtrequest(RTM_DELETE, rt, SA(0));
rtsetifa(rt, ifa);
if (rt->rt_ifp != ifa->ifa_ifp) {
if (rt->rt_llinfo_purge != NULL)
rt->rt_llinfo_purge(rt);
if (rt->rt_if_ref_fn != NULL) {
rt->rt_if_ref_fn(ifa->ifa_ifp, 1);
rt->rt_if_ref_fn(rt->rt_ifp, -1);
}
}
rt->rt_ifp = ifa->ifa_ifp;
rt->rt_rmx.rmx_mtu = ifa->ifa_ifp->if_mtu;
IFA_LOCK_SPIN(ifa);
ifa_rtrequest = ifa->ifa_rtrequest;
IFA_UNLOCK(ifa);
if (ifa_rtrequest != NULL)
ifa_rtrequest(RTM_ADD, rt, SA(0));
}
rt_newaddrmsg(cmd, ifa, error, nrt);
if (use_routegenid)
routegenid_update();
RT_REMREF_LOCKED(rt);
RT_UNLOCK(rt);
}
return (error);
}
u_int64_t
rt_expiry(struct rtentry *rt, u_int64_t base, u_int32_t delta)
{
u_int64_t retval;
if (rt->rt_ifp == NULL || rt->rt_ifp->if_want_aggressive_drain == 0)
retval = base + delta;
else
retval = base + MIN(rt_if_idle_expire_timeout, delta);
return (retval);
}
void
rt_set_idleref(struct rtentry *rt)
{
RT_LOCK_ASSERT_HELD(rt);
rt_clear_idleref(rt);
rt->rt_if_ref_fn = rte_if_ref;
rt->rt_if_ref_fn(rt->rt_ifp, 1);
rt->rt_flags |= RTF_IFREF;
}
void
rt_clear_idleref(struct rtentry *rt)
{
RT_LOCK_ASSERT_HELD(rt);
if (rt->rt_if_ref_fn != NULL) {
rt->rt_if_ref_fn(rt->rt_ifp, -1);
rt->rt_flags &= ~RTF_IFREF;
rt->rt_if_ref_fn = NULL;
}
}
static void
rte_lock_init(struct rtentry *rt)
{
lck_mtx_init(&rt->rt_lock, rte_mtx_grp, rte_mtx_attr);
}
static void
rte_lock_destroy(struct rtentry *rt)
{
RT_LOCK_ASSERT_NOTHELD(rt);
lck_mtx_destroy(&rt->rt_lock, rte_mtx_grp);
}
void
rt_lock(struct rtentry *rt, boolean_t spin)
{
RT_LOCK_ASSERT_NOTHELD(rt);
if (spin)
lck_mtx_lock_spin(&rt->rt_lock);
else
lck_mtx_lock(&rt->rt_lock);
if (rte_debug & RTD_DEBUG)
rte_lock_debug((struct rtentry_dbg *)rt);
}
void
rt_unlock(struct rtentry *rt)
{
RT_LOCK_ASSERT_HELD(rt);
if (rte_debug & RTD_DEBUG)
rte_unlock_debug((struct rtentry_dbg *)rt);
lck_mtx_unlock(&rt->rt_lock);
}
static inline void
rte_lock_debug(struct rtentry_dbg *rte)
{
uint32_t idx;
idx = atomic_add_32_ov(&rte->rtd_lock_cnt, 1) % CTRACE_HIST_SIZE;
if (rte_debug & RTD_TRACE)
ctrace_record(&rte->rtd_lock[idx]);
}
static inline void
rte_unlock_debug(struct rtentry_dbg *rte)
{
uint32_t idx;
idx = atomic_add_32_ov(&rte->rtd_unlock_cnt, 1) % CTRACE_HIST_SIZE;
if (rte_debug & RTD_TRACE)
ctrace_record(&rte->rtd_unlock[idx]);
}
static struct rtentry *
rte_alloc(void)
{
if (rte_debug & RTD_DEBUG)
return (rte_alloc_debug());
return ((struct rtentry *)zalloc(rte_zone));
}
static void
rte_free(struct rtentry *p)
{
if (rte_debug & RTD_DEBUG) {
rte_free_debug(p);
return;
}
if (p->rt_refcnt != 0)
panic("rte_free: rte=%p refcnt=%d non-zero\n", p, p->rt_refcnt);
zfree(rte_zone, p);
}
static void
rte_if_ref(struct ifnet *ifp, int cnt)
{
struct kev_msg ev_msg;
struct net_event_data ev_data;
uint32_t old;
if (cnt < -1 || cnt > 1)
panic("%s: invalid count argument (%d)", __func__, cnt);
old = atomic_add_32_ov(&ifp->if_route_refcnt, cnt);
if (cnt < 0 && old == 0)
panic("%s: ifp=%p negative route refcnt!", __func__, ifp);
if ((ifp->if_idle_flags & IFRF_IDLE_NOTIFY) && cnt < 0 && old == 1) {
bzero(&ev_msg, sizeof (ev_msg));
bzero(&ev_data, sizeof (ev_data));
ev_msg.vendor_code = KEV_VENDOR_APPLE;
ev_msg.kev_class = KEV_NETWORK_CLASS;
ev_msg.kev_subclass = KEV_DL_SUBCLASS;
ev_msg.event_code = KEV_DL_IF_IDLE_ROUTE_REFCNT;
strlcpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ);
ev_data.if_family = ifp->if_family;
ev_data.if_unit = ifp->if_unit;
ev_msg.dv[0].data_length = sizeof (struct net_event_data);
ev_msg.dv[0].data_ptr = &ev_data;
kev_post_msg(&ev_msg);
}
}
static inline struct rtentry *
rte_alloc_debug(void)
{
struct rtentry_dbg *rte;
rte = ((struct rtentry_dbg *)zalloc(rte_zone));
if (rte != NULL) {
bzero(rte, sizeof (*rte));
if (rte_debug & RTD_TRACE)
ctrace_record(&rte->rtd_alloc);
rte->rtd_inuse = RTD_INUSE;
}
return ((struct rtentry *)rte);
}
static inline void
rte_free_debug(struct rtentry *p)
{
struct rtentry_dbg *rte = (struct rtentry_dbg *)p;
if (p->rt_refcnt != 0)
panic("rte_free: rte=%p refcnt=%d\n", p, p->rt_refcnt);
if (rte->rtd_inuse == RTD_FREED)
panic("rte_free: double free rte=%p\n", rte);
else if (rte->rtd_inuse != RTD_INUSE)
panic("rte_free: corrupted rte=%p\n", rte);
bcopy((caddr_t)p, (caddr_t)&rte->rtd_entry_saved, sizeof (*p));
bzero((caddr_t)p, offsetof(struct rtentry, rt_lock));
rte->rtd_inuse = RTD_FREED;
if (rte_debug & RTD_TRACE)
ctrace_record(&rte->rtd_free);
if (!(rte_debug & RTD_NO_FREE))
zfree(rte_zone, p);
}
void
ctrace_record(ctrace_t *tr)
{
tr->th = current_thread();
bzero(tr->pc, sizeof (tr->pc));
(void) OSBacktrace(tr->pc, CTRACE_STACK_SIZE);
}
__private_extern__ void
route_copyout(
struct route *dst,
const struct route *src,
size_t length)
{
bcopy(src, dst, length);
if (dst->ro_rt != NULL)
RT_ADDREF(dst->ro_rt);
}
__private_extern__ void
route_copyin(
struct route *src,
struct route *dst,
size_t length)
{
if (dst->ro_rt == NULL) {
bcopy(src, dst, length);
} else if (src->ro_rt != NULL) {
if (dst->ro_rt == src->ro_rt) {
dst->ro_flags = src->ro_flags;
rtfree(src->ro_rt);
} else {
rtfree(dst->ro_rt);
bcopy(src, dst, length);
}
}
src->ro_rt = NULL;
}