#ifndef _NET_CLASSQ_IF_CLASSQ_H_
#define _NET_CLASSQ_IF_CLASSQ_H_
#ifdef PRIVATE
#define IFCQ_SC_MAX 10
#ifdef BSD_KERNEL_PRIVATE
#include <net/classq/classq.h>
typedef enum cqdq_op {
CLASSQDQ_REMOVE = 1,
CLASSQDQ_POLL = 2,
} cqdq_op_t;
typedef enum cqrq {
CLASSQRQ_PURGE = 1,
CLASSQRQ_PURGE_SC = 2,
CLASSQRQ_EVENT = 3,
CLASSQRQ_THROTTLE = 4,
CLASSQRQ_STAT_SC = 5,
} cqrq_t;
typedef struct cqrq_purge_sc {
mbuf_svc_class_t sc;
u_int32_t flow;
u_int32_t packets;
u_int32_t bytes;
} cqrq_purge_sc_t;
typedef struct cqrq_throttle {
u_int32_t set;
u_int32_t level;
} cqrq_throttle_t;
typedef struct cqrq_stat_sc {
mbuf_svc_class_t sc;
u_int32_t packets;
u_int32_t bytes;
} cqrq_stat_sc_t;
#if PF_ALTQ
#include <net/altq/if_altq.h>
#endif
struct tb_regulator {
u_int64_t tbr_rate_raw;
u_int32_t tbr_percent;
int64_t tbr_rate;
int64_t tbr_depth;
int64_t tbr_token;
int64_t tbr_filluptime;
u_int64_t tbr_last;
int tbr_lastop;
};
struct tb_profile {
u_int64_t rate;
u_int32_t percent;
u_int32_t depth;
};
struct ifclassq;
enum cqdq_op;
enum cqrq;
typedef int (*ifclassq_enq_func)(struct ifclassq *, struct mbuf *);
typedef struct mbuf *(*ifclassq_deq_func)(struct ifclassq *, enum cqdq_op);
typedef struct mbuf *(*ifclassq_deq_sc_func)(struct ifclassq *,
mbuf_svc_class_t, enum cqdq_op);
typedef int (*ifclassq_req_func)(struct ifclassq *, enum cqrq, void *);
struct ifclassq {
decl_lck_mtx_data(, ifcq_lock);
struct ifnet *ifcq_ifp;
u_int32_t ifcq_len;
u_int32_t ifcq_maxlen;
struct pktcntr ifcq_xmitcnt;
struct pktcntr ifcq_dropcnt;
u_int32_t ifcq_type;
u_int32_t ifcq_flags;
u_int32_t ifcq_sflags;
void *ifcq_disc;
struct ifclassq_disc_slot {
u_int32_t qid;
void *cl;
} ifcq_disc_slots[IFCQ_SC_MAX];
ifclassq_enq_func ifcq_enqueue;
ifclassq_deq_func ifcq_dequeue;
ifclassq_deq_sc_func ifcq_dequeue_sc;
ifclassq_req_func ifcq_request;
struct tb_regulator ifcq_tbr;
#if PF_ALTQ
u_int32_t ifcq_drain;
struct ifaltq ifcq_altq;
#endif
};
#if PF_ALTQ
#define IFCQ_ALTQ(_ifcq) (&(_ifcq)->ifcq_altq)
#define IFCQ_IS_DRAINING(_ifcq) ((_ifcq)->ifcq_drain > 0)
#endif
#define IFCQF_READY 0x01
#define IFCQF_ENABLED 0x02
#define IFCQF_TBR 0x04
#define IFCQ_IS_READY(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_READY)
#define IFCQ_IS_ENABLED(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_ENABLED)
#define IFCQ_TBR_IS_ENABLED(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_TBR)
#define CLASSQEQ_DROPPED (-1)
#define CLASSQEQ_SUCCESS 0
#define CLASSQEQ_SUCCESS_FC 1
#define CLASSQEQ_DROPPED_FC 2
#define CLASSQEQ_DROPPED_SP 3
typedef enum cqev {
CLASSQ_EV_LINK_BANDWIDTH = 1,
CLASSQ_EV_LINK_LATENCY = 2,
CLASSQ_EV_LINK_MTU = 3,
CLASSQ_EV_LINK_UP = 4,
CLASSQ_EV_LINK_DOWN = 5,
} cqev_t;
#endif
#include <net/pktsched/pktsched_priq.h>
#include <net/pktsched/pktsched_fairq.h>
#include <net/pktsched/pktsched_tcq.h>
#include <net/pktsched/pktsched_cbq.h>
#include <net/pktsched/pktsched_hfsc.h>
#include <net/pktsched/pktsched_qfq.h>
#ifdef __cplusplus
extern "C" {
#endif
struct if_ifclassq_stats {
u_int32_t ifqs_len;
u_int32_t ifqs_maxlen;
struct pktcntr ifqs_xmitcnt;
struct pktcntr ifqs_dropcnt;
u_int32_t ifqs_scheduler;
union {
struct priq_classstats ifqs_priq_stats;
struct fairq_classstats ifqs_fairq_stats;
struct tcq_classstats ifqs_tcq_stats;
struct cbq_classstats ifqs_cbq_stats;
struct hfsc_classstats ifqs_hfsc_stats;
struct qfq_classstats ifqs_qfq_stats;
};
} __attribute__((aligned(8)));
#ifdef __cplusplus
}
#endif
#ifdef BSD_KERNEL_PRIVATE
#define IFCQ_LOCK_ASSERT_HELD(_ifcq) \
lck_mtx_assert(&(_ifcq)->ifcq_lock, LCK_MTX_ASSERT_OWNED)
#define IFCQ_LOCK_ASSERT_NOTHELD(_ifcq) \
lck_mtx_assert(&(_ifcq)->ifcq_lock, LCK_MTX_ASSERT_NOTOWNED)
#define IFCQ_LOCK(_ifcq) \
lck_mtx_lock(&(_ifcq)->ifcq_lock)
#define IFCQ_LOCK_SPIN(_ifcq) \
lck_mtx_lock_spin(&(_ifcq)->ifcq_lock)
#define IFCQ_CONVERT_LOCK(_ifcq) do { \
IFCQ_LOCK_ASSERT_HELD(_ifcq); \
lck_mtx_convert_spin(&(_ifcq)->ifcq_lock); \
} while (0)
#define IFCQ_UNLOCK(_ifcq) \
lck_mtx_unlock(&(_ifcq)->ifcq_lock)
#define IFCQ_ENQUEUE(_ifq, _m, _err) do { \
(_err) = (*(_ifq)->ifcq_enqueue)(_ifq, _m); \
} while (0)
#define IFCQ_DEQUEUE(_ifq, _m) do { \
(_m) = (*(_ifq)->ifcq_dequeue)(_ifq, CLASSQDQ_REMOVE); \
} while (0)
#define IFCQ_DEQUEUE_SC(_ifq, _sc, _m) do { \
(_m) = (*(_ifq)->ifcq_dequeue_sc)(_ifq, _sc, CLASSQDQ_REMOVE); \
} while (0)
#define IFCQ_TBR_DEQUEUE(_ifcq, _m) do { \
(_m) = ifclassq_tbr_dequeue(_ifcq, CLASSQDQ_REMOVE); \
} while (0)
#define IFCQ_TBR_DEQUEUE_SC(_ifcq, _sc, _m) do { \
(_m) = ifclassq_tbr_dequeue_sc(_ifcq, CLASSQDQ_REMOVE, _sc); \
} while (0)
#define IFCQ_POLL(_ifq, _m) do { \
(_m) = (*(_ifq)->ifcq_dequeue)(_ifq, CLASSQDQ_POLL); \
} while (0)
#define IFCQ_POLL_SC(_ifq, _sc, _m) do { \
(_m) = (*(_ifq)->ifcq_dequeue_sc)(_ifq, _sc, CLASSQDQ_POLL); \
} while (0)
#define IFCQ_TBR_POLL(_ifcq, _m) do { \
(_m) = ifclassq_tbr_dequeue(_ifcq, CLASSQDQ_POLL); \
} while (0)
#define IFCQ_TBR_POLL_SC(_ifcq, _sc, _m) do { \
(_m) = ifclassq_tbr_dequeue_sc(_ifcq, CLASSQDQ_POLL, _sc); \
} while (0)
#define IFCQ_PURGE(_ifq) do { \
(void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE, NULL); \
} while (0)
#define IFCQ_PURGE_SC(_ifq, _sc, _flow, _packets, _bytes) do { \
cqrq_purge_sc_t _req = { _sc, _flow, 0, 0 }; \
(void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE_SC, &_req); \
(_packets) = _req.packets; \
(_bytes) = _req.bytes; \
} while (0)
#define IFCQ_UPDATE(_ifq, _ev) do { \
(void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_EVENT, \
(void *)(_ev)); \
} while (0)
#define IFCQ_SET_THROTTLE(_ifq, _level, _err) do { \
cqrq_throttle_t _req = { 1, _level }; \
(_err) = (*(_ifq)->ifcq_request) \
(_ifq, CLASSQRQ_THROTTLE, &_req); \
} while (0)
#define IFCQ_GET_THROTTLE(_ifq, _level, _err) do { \
cqrq_throttle_t _req = { 0, IFNET_THROTTLE_OFF }; \
(_err) = (*(_ifq)->ifcq_request) \
(_ifq, CLASSQRQ_THROTTLE, &_req); \
(_level) = _req.level; \
} while (0)
#define IFCQ_LEN_SC(_ifq, _sc, _packets, _bytes, _err) do { \
cqrq_stat_sc_t _req = { _sc, 0, 0 }; \
(_err) = (*(ifq)->ifcq_request)(_ifq, CLASSQRQ_STAT_SC, &_req); \
if ((_packets) != NULL) \
(*(_packets)) = _req.packets; \
if ((_bytes) != NULL) \
(*(_bytes)) = _req.bytes; \
} while (0)
#define IFCQ_LEN(_ifcq) ((_ifcq)->ifcq_len)
#define IFCQ_QFULL(_ifcq) (IFCQ_LEN(_ifcq) >= (_ifcq)->ifcq_maxlen)
#define IFCQ_IS_EMPTY(_ifcq) (IFCQ_LEN(_ifcq) == 0)
#define IFCQ_INC_LEN(_ifcq) (IFCQ_LEN(_ifcq)++)
#define IFCQ_DEC_LEN(_ifcq) (IFCQ_LEN(_ifcq)--)
#define IFCQ_MAXLEN(_ifcq) ((_ifcq)->ifcq_maxlen)
#define IFCQ_SET_MAXLEN(_ifcq, _len) ((_ifcq)->ifcq_maxlen = (_len))
#define IFCQ_XMIT_ADD(_ifcq, _pkt, _len) do { \
PKTCNTR_ADD(&(_ifcq)->ifcq_xmitcnt, _pkt, _len); \
} while (0)
#define IFCQ_DROP_ADD(_ifcq, _pkt, _len) do { \
PKTCNTR_ADD(&(_ifcq)->ifcq_dropcnt, _pkt, _len); \
} while (0)
extern int ifclassq_setup(struct ifnet *, u_int32_t, boolean_t);
extern void ifclassq_teardown(struct ifnet *);
extern int ifclassq_pktsched_setup(struct ifclassq *);
extern void ifclassq_set_maxlen(struct ifclassq *, u_int32_t);
extern u_int32_t ifclassq_get_maxlen(struct ifclassq *);
extern int ifclassq_get_len(struct ifclassq *, mbuf_svc_class_t,
u_int32_t *, u_int32_t *);
extern errno_t ifclassq_enqueue(struct ifclassq *, struct mbuf *);
extern errno_t ifclassq_dequeue(struct ifclassq *, u_int32_t, struct mbuf **,
struct mbuf **, u_int32_t *, u_int32_t *);
extern errno_t ifclassq_dequeue_sc(struct ifclassq *, mbuf_svc_class_t,
u_int32_t, struct mbuf **, struct mbuf **, u_int32_t *, u_int32_t *);
extern struct mbuf *ifclassq_poll(struct ifclassq *);
extern struct mbuf *ifclassq_poll_sc(struct ifclassq *, mbuf_svc_class_t);
extern void ifclassq_update(struct ifclassq *, cqev_t);
extern int ifclassq_attach(struct ifclassq *, u_int32_t, void *,
ifclassq_enq_func, ifclassq_deq_func, ifclassq_deq_sc_func,
ifclassq_req_func);
extern int ifclassq_detach(struct ifclassq *);
extern int ifclassq_getqstats(struct ifclassq *, u_int32_t,
void *, u_int32_t *);
extern const char *ifclassq_ev2str(cqev_t);
extern int ifclassq_tbr_set(struct ifclassq *, struct tb_profile *, boolean_t);
extern struct mbuf *ifclassq_tbr_dequeue(struct ifclassq *, int);
extern struct mbuf *ifclassq_tbr_dequeue_sc(struct ifclassq *, int,
mbuf_svc_class_t);
#endif
#endif
#endif