#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <sys/socket.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/syslog.h>
#include <sys/mcache.h>
#include <kern/locks.h>
#include <net/if.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/in_var.h>
#include <netinet/in_arp.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet6/nd6.h>
extern int tvtohz(struct timeval *);
static int in_rtqtimo_run;
static void in_rtqtimo(void *);
static void in_sched_rtqtimo(struct timeval *);
static struct radix_node *in_addroute(void *, void *, struct radix_node_head *,
struct radix_node *);
static struct radix_node *in_deleteroute(void *, void *,
struct radix_node_head *);
static struct radix_node *in_matroute(void *, struct radix_node_head *);
static struct radix_node *in_matroute_args(void *, struct radix_node_head *,
rn_matchf_t *f, void *);
static void in_clsroute(struct radix_node *, struct radix_node_head *);
static int in_rtqkill(struct radix_node *, void *);
static int in_ifadownkill(struct radix_node *, void *);
static struct radix_node *
in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
struct radix_node *treenodes)
{
struct rtentry *rt = (struct rtentry *)treenodes;
struct sockaddr_in *sin = (struct sockaddr_in *)(void *)rt_key(rt);
struct radix_node *ret;
char dbuf[MAX_IPv4_STR_LEN], gbuf[MAX_IPv4_STR_LEN];
uint32_t flags = rt->rt_flags;
boolean_t verbose = (rt_verbose > 1);
LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
RT_LOCK_ASSERT_HELD(rt);
if (verbose) {
rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf));
}
if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
rt->rt_flags |= RTF_MULTICAST;
}
if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) {
rt->rt_flags |= RTF_PRCLONING;
}
if (rt->rt_flags & RTF_HOST) {
if (in_broadcast(sin->sin_addr, rt->rt_ifp)) {
rt->rt_flags |= RTF_BROADCAST;
} else {
RT_CONVERT_LOCK(rt);
IFA_LOCK_SPIN(rt->rt_ifa);
if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr ==
sin->sin_addr.s_addr) {
rt->rt_flags |= RTF_LOCAL;
}
IFA_UNLOCK(rt->rt_ifa);
}
}
if (!rt->rt_rmx.rmx_mtu && !(rt->rt_rmx.rmx_locks & RTV_MTU) &&
rt->rt_ifp) {
rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
if (INTF_ADJUST_MTU_FOR_CLAT46(rt->rt_ifp)) {
rt->rt_rmx.rmx_mtu = IN6_LINKMTU(rt->rt_ifp);
rt->rt_rmx.rmx_mtu -= CLAT46_HDR_EXPANSION_OVERHD;
}
}
ret = rn_addroute(v_arg, n_arg, head, treenodes);
if (ret == NULL && (rt->rt_flags & RTF_HOST)) {
struct rtentry *rt2;
rt2 = rtalloc1_scoped_locked(rt_key(rt), 0,
RTF_CLONING | RTF_PRCLONING, sin_get_ifscope(rt_key(rt)));
if (rt2 != NULL) {
char dbufc[MAX_IPv4_STR_LEN];
RT_LOCK(rt2);
if (verbose) {
rt_str(rt2, dbufc, sizeof(dbufc), NULL, 0);
}
if ((rt2->rt_flags & RTF_LLINFO) &&
(rt2->rt_flags & RTF_HOST) &&
rt2->rt_gateway != NULL &&
rt2->rt_gateway->sa_family == AF_LINK) {
if (verbose) {
log(LOG_DEBUG, "%s: unable to insert "
"route to %s;%s, flags=%b, due to "
"existing ARP route %s->%s "
"flags=%b, attempting to delete\n",
__func__, dbuf,
(rt->rt_ifp != NULL) ?
rt->rt_ifp->if_xname : "",
rt->rt_flags, RTF_BITS, dbufc,
(rt2->rt_ifp != NULL) ?
rt2->rt_ifp->if_xname : "",
rt2->rt_flags, RTF_BITS);
}
RT_UNLOCK(rt2);
(void) rtrequest_locked(RTM_DELETE, rt_key(rt2),
rt2->rt_gateway, rt_mask(rt2),
rt2->rt_flags, NULL);
ret = rn_addroute(v_arg, n_arg, head,
treenodes);
} else {
RT_UNLOCK(rt2);
}
rtfree_locked(rt2);
}
}
if (!verbose) {
goto done;
}
if (ret != NULL) {
if (flags != rt->rt_flags) {
log(LOG_DEBUG, "%s: route to %s->%s->%s inserted, "
"oflags=%b, flags=%b\n", __func__,
dbuf, gbuf, (rt->rt_ifp != NULL) ?
rt->rt_ifp->if_xname : "", flags, RTF_BITS,
rt->rt_flags, RTF_BITS);
} else {
log(LOG_DEBUG, "%s: route to %s->%s->%s inserted, "
"flags=%b\n", __func__, dbuf, gbuf,
(rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "",
rt->rt_flags, RTF_BITS);
}
} else {
log(LOG_DEBUG, "%s: unable to insert route to %s->%s->%s, "
"flags=%b, already exists\n", __func__, dbuf, gbuf,
(rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "",
rt->rt_flags, RTF_BITS);
}
done:
return ret;
}
static struct radix_node *
in_deleteroute(void *v_arg, void *netmask_arg, struct radix_node_head *head)
{
struct radix_node *rn;
LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
rn = rn_delete(v_arg, netmask_arg, head);
if (rt_verbose > 1 && rn != NULL) {
char dbuf[MAX_IPv4_STR_LEN], gbuf[MAX_IPv4_STR_LEN];
struct rtentry *rt = (struct rtentry *)rn;
RT_LOCK(rt);
rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf));
log(LOG_DEBUG, "%s: route to %s->%s->%s deleted, "
"flags=%b\n", __func__, dbuf, gbuf, (rt->rt_ifp != NULL) ?
rt->rt_ifp->if_xname : "", rt->rt_flags, RTF_BITS);
RT_UNLOCK(rt);
}
return rn;
}
struct radix_node *
in_validate(struct radix_node *rn)
{
struct rtentry *rt = (struct rtentry *)rn;
RT_LOCK_ASSERT_HELD(rt);
if (rt->rt_refcnt == 0) {
if (rt_verbose > 2) {
char dbuf[MAX_IPv4_STR_LEN], gbuf[MAX_IPv4_STR_LEN];
rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf));
log(LOG_DEBUG, "%s: route to %s->%s->%s validated, "
"flags=%b\n", __func__, dbuf, gbuf,
(rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "",
rt->rt_flags, RTF_BITS);
}
if (rt->rt_flags & RTPRF_OURS) {
rt->rt_flags &= ~RTPRF_OURS;
rt_setexpire(rt, 0);
}
}
return rn;
}
static struct radix_node *
in_matroute(void *v_arg, struct radix_node_head *head)
{
return in_matroute_args(v_arg, head, NULL, NULL);
}
static struct radix_node *
in_matroute_args(void *v_arg, struct radix_node_head *head,
rn_matchf_t *f, void *w)
{
struct radix_node *rn = rn_match_args(v_arg, head, f, w);
if (rn != NULL) {
RT_LOCK_SPIN((struct rtentry *)rn);
in_validate(rn);
RT_UNLOCK((struct rtentry *)rn);
}
return rn;
}
static uint32_t rtq_reallyold = 60 * 60;
SYSCTL_UINT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire,
CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_reallyold, 0,
"Default expiration time on dynamically learned routes");
static uint32_t rtq_minreallyold = 10;
SYSCTL_UINT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire,
CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_minreallyold, 0,
"Minimum time to attempt to hold onto dynamically learned routes");
static uint32_t rtq_toomany = 128;
SYSCTL_UINT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache,
CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_toomany, 0,
"Upper limit on dynamically learned routes");
static void
in_clsroute(struct radix_node *rn, struct radix_node_head *head)
{
#pragma unused(head)
char dbuf[MAX_IPv4_STR_LEN], gbuf[MAX_IPv4_STR_LEN];
struct rtentry *rt = (struct rtentry *)rn;
boolean_t verbose = (rt_verbose > 1);
LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
RT_LOCK_ASSERT_HELD(rt);
if (!(rt->rt_flags & RTF_UP)) {
return;
}
if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) {
return;
}
if (rt->rt_flags & RTPRF_OURS) {
return;
}
if (!(rt->rt_flags & (RTF_WASCLONED | RTF_DYNAMIC))) {
return;
}
if (verbose) {
rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf));
}
if ((rt->rt_flags & RTF_DELCLONE) || rtq_reallyold == 0) {
int err;
if (verbose) {
log(LOG_DEBUG, "%s: deleting route to %s->%s->%s, "
"flags=%b\n", __func__, dbuf, gbuf,
(rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "",
rt->rt_flags, RTF_BITS);
}
RT_UNLOCK(rt);
err = rtrequest_locked(RTM_DELETE, rt_key(rt),
rt->rt_gateway, rt_mask(rt), rt->rt_flags, &rt);
if (err == 0) {
RT_LOCK(rt);
RT_REMREF_LOCKED(rt);
} else {
RT_LOCK(rt);
if (!verbose) {
rt_str(rt, dbuf, sizeof(dbuf),
gbuf, sizeof(gbuf));
}
log(LOG_ERR, "%s: error deleting route to "
"%s->%s->%s, flags=%b, err=%d\n", __func__,
dbuf, gbuf, (rt->rt_ifp != NULL) ?
rt->rt_ifp->if_xname : "", rt->rt_flags,
RTF_BITS, err);
}
} else {
uint64_t timenow;
timenow = net_uptime();
rt->rt_flags |= RTPRF_OURS;
rt_setexpire(rt, timenow + rtq_reallyold);
if (verbose) {
log(LOG_DEBUG, "%s: route to %s->%s->%s invalidated, "
"flags=%b, expire=T+%u\n", __func__, dbuf, gbuf,
(rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "",
rt->rt_flags, RTF_BITS, rt->rt_expire - timenow);
}
in_sched_rtqtimo(NULL);
}
}
struct rtqk_arg {
struct radix_node_head *rnh;
int updating;
int draining;
uint32_t killed;
uint32_t found;
uint64_t nextstop;
};
static int
in_rtqkill(struct radix_node *rn, void *rock)
{
struct rtqk_arg *ap = rock;
struct rtentry *rt = (struct rtentry *)rn;
boolean_t verbose = (rt_verbose > 1);
uint64_t timenow;
int err;
timenow = net_uptime();
LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
RT_LOCK(rt);
if (rt->rt_flags & RTPRF_OURS) {
char dbuf[MAX_IPv4_STR_LEN], gbuf[MAX_IPv4_STR_LEN];
if (verbose) {
rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf));
}
ap->found++;
VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
if (ap->draining || rt->rt_expire <= timenow) {
if (rt->rt_refcnt > 0) {
panic("%s: route %p marked with RTPRF_OURS "
"with non-zero refcnt (%u)", __func__,
rt, rt->rt_refcnt);
}
if (verbose) {
log(LOG_DEBUG, "%s: deleting route to "
"%s->%s->%s, flags=%b, draining=%d\n",
__func__, dbuf, gbuf, (rt->rt_ifp != NULL) ?
rt->rt_ifp->if_xname : "", rt->rt_flags,
RTF_BITS, ap->draining);
}
RT_ADDREF_LOCKED(rt);
RT_UNLOCK(rt);
err = rtrequest_locked(RTM_DELETE, rt_key(rt),
rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL);
if (err != 0) {
RT_LOCK(rt);
if (!verbose) {
rt_str(rt, dbuf, sizeof(dbuf),
gbuf, sizeof(gbuf));
}
log(LOG_ERR, "%s: error deleting route to "
"%s->%s->%s, flags=%b, err=%d\n", __func__,
dbuf, gbuf, (rt->rt_ifp != NULL) ?
rt->rt_ifp->if_xname : "", rt->rt_flags,
RTF_BITS, err);
RT_UNLOCK(rt);
} else {
ap->killed++;
}
rtfree_locked(rt);
} else {
uint64_t expire = (rt->rt_expire - timenow);
if (ap->updating && expire > rtq_reallyold) {
rt_setexpire(rt, timenow + rtq_reallyold);
if (verbose) {
log(LOG_DEBUG, "%s: route to "
"%s->%s->%s, flags=%b, adjusted "
"expire=T+%u (was T+%u)\n",
__func__, dbuf, gbuf,
(rt->rt_ifp != NULL) ?
rt->rt_ifp->if_xname : "",
rt->rt_flags, RTF_BITS,
(rt->rt_expire - timenow), expire);
}
}
ap->nextstop = lmin(ap->nextstop, rt->rt_expire);
RT_UNLOCK(rt);
}
} else {
RT_UNLOCK(rt);
}
return 0;
}
#define RTQ_TIMEOUT 60*10
static int rtq_timeout = RTQ_TIMEOUT;
static void
in_rtqtimo(void *targ)
{
#pragma unused(targ)
struct radix_node_head *rnh;
struct rtqk_arg arg;
struct timeval atv;
static uint64_t last_adjusted_timeout = 0;
boolean_t verbose = (rt_verbose > 1);
uint64_t timenow;
uint32_t ours;
lck_mtx_lock(rnh_lock);
rnh = rt_tables[AF_INET];
VERIFY(rnh != NULL);
timenow = net_uptime();
if (verbose) {
log(LOG_DEBUG, "%s: initial nextstop is T+%u seconds\n",
__func__, rtq_timeout);
}
bzero(&arg, sizeof(arg));
arg.rnh = rnh;
arg.nextstop = timenow + rtq_timeout;
rnh->rnh_walktree(rnh, in_rtqkill, &arg);
if (verbose) {
log(LOG_DEBUG, "%s: found %u, killed %u\n", __func__,
arg.found, arg.killed);
}
ours = (arg.found - arg.killed);
if (ours > rtq_toomany &&
((timenow - last_adjusted_timeout) >= (uint64_t)rtq_timeout) &&
rtq_reallyold > rtq_minreallyold) {
rtq_reallyold = 2 * rtq_reallyold / 3;
if (rtq_reallyold < rtq_minreallyold) {
rtq_reallyold = rtq_minreallyold;
}
last_adjusted_timeout = timenow;
if (verbose) {
log(LOG_DEBUG, "%s: adjusted rtq_reallyold to %d "
"seconds\n", __func__, rtq_reallyold);
}
arg.found = arg.killed = 0;
arg.updating = 1;
rnh->rnh_walktree(rnh, in_rtqkill, &arg);
}
atv.tv_usec = 0;
atv.tv_sec = arg.nextstop - timenow;
in_rtqtimo_run = 0;
if (ours > 0) {
in_sched_rtqtimo(&atv);
} else if (verbose) {
log(LOG_DEBUG, "%s: not rescheduling timer\n", __func__);
}
lck_mtx_unlock(rnh_lock);
}
static void
in_sched_rtqtimo(struct timeval *atv)
{
LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
if (!in_rtqtimo_run) {
struct timeval tv;
if (atv == NULL) {
tv.tv_usec = 0;
tv.tv_sec = MAX(rtq_timeout / 10, 1);
atv = &tv;
}
if (rt_verbose > 1) {
log(LOG_DEBUG, "%s: timer scheduled in "
"T+%llus.%lluu\n", __func__,
(uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec);
}
in_rtqtimo_run = 1;
timeout(in_rtqtimo, NULL, tvtohz(atv));
}
}
void
in_rtqdrain(void)
{
struct radix_node_head *rnh;
struct rtqk_arg arg;
if (rt_verbose > 1) {
log(LOG_DEBUG, "%s: draining routes\n", __func__);
}
lck_mtx_lock(rnh_lock);
rnh = rt_tables[AF_INET];
VERIFY(rnh != NULL);
bzero(&arg, sizeof(arg));
arg.rnh = rnh;
arg.draining = 1;
rnh->rnh_walktree(rnh, in_rtqkill, &arg);
lck_mtx_unlock(rnh_lock);
}
int
in_inithead(void **head, int off)
{
struct radix_node_head *rnh;
VERIFY(head != (void **)&rt_tables[AF_INET] || *head == NULL);
if (!rn_inithead(head, off)) {
return 0;
}
if (head != (void **)&rt_tables[AF_INET]) {
return 1;
}
rnh = *head;
rnh->rnh_addaddr = in_addroute;
rnh->rnh_deladdr = in_deleteroute;
rnh->rnh_matchaddr = in_matroute;
rnh->rnh_matchaddr_args = in_matroute_args;
rnh->rnh_close = in_clsroute;
return 1;
}
struct in_ifadown_arg {
struct radix_node_head *rnh;
struct ifaddr *ifa;
int del;
};
static int
in_ifadownkill(struct radix_node *rn, void *xap)
{
char dbuf[MAX_IPv4_STR_LEN], gbuf[MAX_IPv4_STR_LEN];
struct in_ifadown_arg *ap = xap;
struct rtentry *rt = (struct rtentry *)rn;
boolean_t verbose = (rt_verbose != 0);
int err;
LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
RT_LOCK(rt);
if (rt->rt_ifa == ap->ifa &&
(ap->del || !(rt->rt_flags & RTF_STATIC))) {
rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf));
if (verbose) {
log(LOG_DEBUG, "%s: deleting route to %s->%s->%s, "
"flags=%b\n", __func__, dbuf, gbuf,
(rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "",
rt->rt_flags, RTF_BITS);
}
RT_ADDREF_LOCKED(rt);
rt->rt_flags &= ~(RTF_CLONING | RTF_PRCLONING);
RT_UNLOCK(rt);
err = rtrequest_locked(RTM_DELETE, rt_key(rt),
rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL);
if (err != 0) {
RT_LOCK(rt);
if (!verbose) {
rt_str(rt, dbuf, sizeof(dbuf),
gbuf, sizeof(gbuf));
}
log(LOG_ERR, "%s: error deleting route to "
"%s->%s->%s, flags=%b, err=%d\n", __func__,
dbuf, gbuf, (rt->rt_ifp != NULL) ?
rt->rt_ifp->if_xname : "", rt->rt_flags,
RTF_BITS, err);
RT_UNLOCK(rt);
}
rtfree_locked(rt);
} else {
RT_UNLOCK(rt);
}
return 0;
}
int
in_ifadown(struct ifaddr *ifa, int delete)
{
struct in_ifadown_arg arg;
struct radix_node_head *rnh;
LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
if (ifa->ifa_addr->sa_family != AF_INET) {
return 1;
}
routegenid_inet_update();
arg.rnh = rnh = rt_tables[AF_INET];
arg.ifa = ifa;
arg.del = delete;
rnh->rnh_walktree(rnh, in_ifadownkill, &arg);
IFA_LOCK_SPIN(ifa);
ifa->ifa_flags &= ~IFA_ROUTE;
IFA_UNLOCK(ifa);
return 0;
}