ixgbe: Style pass on FreeBSD part of driver

Fix up some indentation and reflow long lines

Sponsored by:	BBOX.io

(cherry picked from commit c58d34dd67a419866ee50f152044e49cecbae261)
This commit is contained in:
Kevin Bowling 2024-11-24 00:18:33 -07:00
parent bfabd58a9b
commit 55ddb8f7f4
7 changed files with 765 additions and 610 deletions

View file

@ -1,4 +1,4 @@
/******************************************************************************
/*****************************************************************************
Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
@ -29,7 +29,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
*****************************************************************************/
#include "ixgbe.h"
@ -114,11 +114,11 @@ ixgbe_get_bypass_time(u32 *year, u32 *sec)
static int
ixgbe_bp_version(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
int error = 0;
static int version = 0;
u32 cmd;
int error = 0;
static int version = 0;
u32 cmd;
ixgbe_bypass_mutex_enter(sc);
cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
@ -154,15 +154,14 @@ err:
static int
ixgbe_bp_set_state(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
int error = 0;
static int state = 0;
int error = 0;
static int state = 0;
/* Get the current state */
ixgbe_bypass_mutex_enter(sc);
error = hw->mac.ops.bypass_rw(hw,
BYPASS_PAGE_CTL0, &state);
error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &state);
ixgbe_bypass_mutex_clear(sc);
if (error != 0)
return (error);
@ -216,10 +215,10 @@ out:
static int
ixgbe_bp_timeout(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
int error = 0;
static int timeout = 0;
int error = 0;
static int timeout = 0;
/* Get the current value */
ixgbe_bypass_mutex_enter(sc);
@ -259,10 +258,10 @@ ixgbe_bp_timeout(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_main_on(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
int error = 0;
static int main_on = 0;
int error = 0;
static int main_on = 0;
ixgbe_bypass_mutex_enter(sc);
error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &main_on);
@ -301,10 +300,10 @@ ixgbe_bp_main_on(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_main_off(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
int error = 0;
static int main_off = 0;
int error = 0;
static int main_off = 0;
ixgbe_bypass_mutex_enter(sc);
error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &main_off);
@ -343,10 +342,10 @@ ixgbe_bp_main_off(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_aux_on(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
int error = 0;
static int aux_on = 0;
int error = 0;
static int aux_on = 0;
ixgbe_bypass_mutex_enter(sc);
error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &aux_on);
@ -385,10 +384,10 @@ ixgbe_bp_aux_on(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_aux_off(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
int error = 0;
static int aux_off = 0;
int error = 0;
static int aux_off = 0;
ixgbe_bypass_mutex_enter(sc);
error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &aux_off);
@ -432,11 +431,11 @@ ixgbe_bp_aux_off(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_wd_set(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
int error, tmp;
static int timeout = 0;
u32 mask, arg;
int error, tmp;
static int timeout = 0;
u32 mask, arg;
/* Get the current hardware value */
ixgbe_bypass_mutex_enter(sc);
@ -503,11 +502,11 @@ ixgbe_bp_wd_set(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_wd_reset(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
u32 sec, year;
int cmd, count = 0, error = 0;
int reset_wd = 0;
u32 sec, year;
int cmd, count = 0, error = 0;
int reset_wd = 0;
error = sysctl_handle_int(oidp, &reset_wd, 0, req);
if ((error) || (req->newptr == NULL))
@ -549,14 +548,14 @@ ixgbe_bp_wd_reset(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_log(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
u32 cmd, base, head;
u32 log_off, count = 0;
static int status = 0;
u8 data;
struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
u32 cmd, base, head;
u32 log_off, count = 0;
static int status = 0;
u8 data;
struct ixgbe_bypass_eeprom eeprom[BYPASS_MAX_LOGS];
int i, error = 0;
int i, error = 0;
error = sysctl_handle_int(oidp, &status, 0, req);
if ((error) || (req->newptr == NULL))
@ -639,12 +638,15 @@ ixgbe_bp_log(SYSCTL_HANDLER_ARGS)
BYPASS_LOG_EVENT_SHIFT;
u8 action = eeprom[count].actions & BYPASS_LOG_ACTION_M;
u16 day_mon[2][13] = {
{0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365},
{0, 31, 59, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366}
{0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304,
334, 365},
{0, 31, 59, 91, 121, 152, 182, 213, 244, 274, 305,
335, 366}
};
char *event_str[] = {"unknown", "main on", "aux on",
"main off", "aux off", "WDT", "user" };
char *action_str[] = {"ignore", "normal", "bypass", "isolate",};
char *action_str[] =
{"ignore", "normal", "bypass", "isolate",};
/* verify vaild data 1 - 6 */
if (event < BYPASS_EVENT_MAIN_ON || event > BYPASS_EVENT_USR)
@ -711,11 +713,11 @@ unlock_err:
void
ixgbe_bypass_init(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
device_t dev = sc->dev;
struct sysctl_oid *bp_node;
struct ixgbe_hw *hw = &sc->hw;
device_t dev = sc->dev;
struct sysctl_oid *bp_node;
struct sysctl_oid_list *bp_list;
u32 mask, value, sec, year;
u32 mask, value, sec, year;
if (!(sc->feat_cap & IXGBE_FEATURE_BYPASS))
return;
@ -723,13 +725,13 @@ ixgbe_bypass_init(struct ixgbe_softc *sc)
/* First set up time for the hardware */
ixgbe_get_bypass_time(&year, &sec);
mask = BYPASS_CTL1_TIME_M
| BYPASS_CTL1_VALID_M
| BYPASS_CTL1_OFFTRST_M;
mask = BYPASS_CTL1_TIME_M |
BYPASS_CTL1_VALID_M |
BYPASS_CTL1_OFFTRST_M;
value = (sec & BYPASS_CTL1_TIME_M)
| BYPASS_CTL1_VALID
| BYPASS_CTL1_OFFTRST;
value = (sec & BYPASS_CTL1_TIME_M) |
BYPASS_CTL1_VALID |
BYPASS_CTL1_OFFTRST;
ixgbe_bypass_mutex_enter(sc);
hw->mac.ops.bypass_set(hw, BYPASS_PAGE_CTL1, mask, value);

View file

@ -1,4 +1,4 @@
/******************************************************************************
/*****************************************************************************
Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
@ -29,7 +29,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
*****************************************************************************/
#include "ixgbe.h"
@ -51,9 +51,9 @@ ixgbe_init_fdir(struct ixgbe_softc *sc)
void
ixgbe_reinit_fdir(void *context)
{
if_ctx_t ctx = context;
if_ctx_t ctx = context;
struct ixgbe_softc *sc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
if_t ifp = iflib_get_ifp(ctx);
if (!(sc->feat_en & IXGBE_FEATURE_FDIR))
return;
@ -79,16 +79,16 @@ ixgbe_reinit_fdir(void *context)
void
ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
{
struct ixgbe_softc *sc = txr->sc;
struct ix_queue *que;
struct ip *ip;
struct tcphdr *th;
struct udphdr *uh;
struct ether_vlan_header *eh;
struct ixgbe_softc *sc = txr->sc;
struct ix_queue *que;
struct ip *ip;
struct tcphdr *th;
struct udphdr *uh;
struct ether_vlan_header *eh;
union ixgbe_atr_hash_dword input = {.dword = 0};
union ixgbe_atr_hash_dword common = {.dword = 0};
int ehdrlen, ip_hlen;
u16 etype;
int ehdrlen, ip_hlen;
u16 etype;
eh = mtod(mp, struct ether_vlan_header *);
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
/******************************************************************************
/*****************************************************************************
Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
@ -29,7 +29,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
*****************************************************************************/
#include "opt_inet.h"
@ -58,13 +58,18 @@ static const char ixv_driver_version[] = "2.0.1-k";
************************************************************************/
static const pci_vendor_info_t ixv_vendor_info_array[] =
{
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) X520 82599 Virtual Function"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) X540 Virtual Function"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) X550 Virtual Function"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) X552 Virtual Function"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) X553 Virtual Function"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF,
"Intel(R) X520 82599 Virtual Function"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF,
"Intel(R) X540 Virtual Function"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF,
"Intel(R) X550 Virtual Function"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF,
"Intel(R) X552 Virtual Function"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF,
"Intel(R) X553 Virtual Function"),
/* required last entry */
PVID_END
PVID_END
};
/************************************************************************
@ -76,8 +81,10 @@ static int ixv_if_attach_post(if_ctx_t);
static int ixv_if_detach(if_ctx_t);
static int ixv_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
static int ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
static int ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
static int ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
int);
static int ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
int);
static void ixv_if_queues_free(if_ctx_t);
static void ixv_identify_hardware(if_ctx_t);
static void ixv_init_device_features(struct ixgbe_softc *);
@ -239,17 +246,17 @@ ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
int ntxqs, int ntxqsets)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
if_softc_ctx_t scctx = sc->shared;
if_softc_ctx_t scctx = sc->shared;
struct ix_tx_queue *que;
int i, j, error;
int i, j, error;
MPASS(sc->num_tx_queues == ntxqsets);
MPASS(ntxqs == 1);
/* Allocate queue structure memory */
sc->tx_queues =
(struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
M_DEVBUF, M_NOWAIT | M_ZERO);
(struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) *
ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO);
if (!sc->tx_queues) {
device_printf(iflib_get_dev(ctx),
"Unable to allocate TX ring memory\n");
@ -263,13 +270,14 @@ ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
txr->sc = que->sc = sc;
/* Allocate report status array */
if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) *
scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
error = ENOMEM;
goto fail;
}
for (j = 0; j < scctx->isc_ntxd[0]; j++)
txr->tx_rsq[j] = QIDX_INVALID;
/* get the virtual and physical address of the hardware queues */
/* get virtual and physical address of the hardware queues */
txr->tail = IXGBE_VFTDT(txr->me);
txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
txr->tx_paddr = paddrs[i*ntxqs];
@ -299,15 +307,15 @@ ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_rx_queue *que;
int i, error;
int i, error;
MPASS(sc->num_rx_queues == nrxqsets);
MPASS(nrxqs == 1);
/* Allocate queue structure memory */
sc->rx_queues =
(struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
M_DEVBUF, M_NOWAIT | M_ZERO);
(struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) *
nrxqsets, M_DEVBUF, M_NOWAIT | M_ZERO);
if (!sc->rx_queues) {
device_printf(iflib_get_dev(ctx),
"Unable to allocate TX ring memory\n");
@ -348,7 +356,7 @@ ixv_if_queues_free(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_tx_queue *que = sc->tx_queues;
int i;
int i;
if (que == NULL)
goto free;
@ -382,11 +390,11 @@ free:
static int
ixv_if_attach_pre(if_ctx_t ctx)
{
struct ixgbe_softc *sc;
device_t dev;
if_softc_ctx_t scctx;
struct ixgbe_softc *sc;
device_t dev;
if_softc_ctx_t scctx;
struct ixgbe_hw *hw;
int error = 0;
int error = 0;
INIT_DEBUGOUT("ixv_attach: begin");
@ -458,7 +466,7 @@ ixv_if_attach_pre(if_ctx_t ctx)
/* Check if VF was disabled by PF */
error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
if (error) {
/* PF is not capable of controlling VF state. Enable the link. */
/* PF is not capable of controlling VF state. Enable link. */
sc->link_enabled = true;
}
@ -522,8 +530,8 @@ static int
ixv_if_attach_post(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
device_t dev = iflib_get_dev(ctx);
int error = 0;
device_t dev = iflib_get_dev(ctx);
int error = 0;
/* Setup OS specific network interface */
error = ixv_setup_interface(ctx);
@ -568,7 +576,7 @@ ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
int error = 0;
int error = 0;
IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
@ -596,9 +604,9 @@ ixv_if_init(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
device_t dev = iflib_get_dev(ctx);
device_t dev = iflib_get_dev(ctx);
struct ixgbe_hw *hw = &sc->hw;
int error = 0;
int error = 0;
INIT_DEBUGOUT("ixv_if_init: begin");
hw->adapter_stopped = false;
@ -670,8 +678,8 @@ static inline void
ixv_enable_queue(struct ixgbe_softc *sc, u32 vector)
{
struct ixgbe_hw *hw = &sc->hw;
u32 queue = 1 << vector;
u32 mask;
u32 queue = 1 << vector;
u32 mask;
mask = (IXGBE_EIMS_RTX_QUEUE & queue);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
@ -684,8 +692,8 @@ static inline void
ixv_disable_queue(struct ixgbe_softc *sc, u32 vector)
{
struct ixgbe_hw *hw = &sc->hw;
u64 queue = (u64)(1 << vector);
u32 mask;
u64 queue = (u64)(1 << vector);
u32 mask;
mask = (IXGBE_EIMS_RTX_QUEUE & queue);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
@ -699,7 +707,7 @@ static int
ixv_msix_que(void *arg)
{
struct ix_rx_queue *que = arg;
struct ixgbe_softc *sc = que->sc;
struct ixgbe_softc *sc = que->sc;
ixv_disable_queue(sc, que->msix);
++que->irqs;
@ -713,9 +721,9 @@ ixv_msix_que(void *arg)
static int
ixv_msix_mbx(void *arg)
{
struct ixgbe_softc *sc = arg;
struct ixgbe_softc *sc = arg;
struct ixgbe_hw *hw = &sc->hw;
u32 reg;
u32 reg;
++sc->link_irq;
@ -811,11 +819,13 @@ static int
ixv_negotiate_api(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
int mbx_api[] = { ixgbe_mbox_api_12,
ixgbe_mbox_api_11,
ixgbe_mbox_api_10,
ixgbe_mbox_api_unknown };
int i = 0;
int mbx_api[] = {
ixgbe_mbox_api_12,
ixgbe_mbox_api_11,
ixgbe_mbox_api_10,
ixgbe_mbox_api_unknown
};
int i = 0;
while (mbx_api[i] != ixgbe_mbox_api_unknown) {
if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
@ -830,7 +840,8 @@ ixv_negotiate_api(struct ixgbe_softc *sc)
static u_int
ixv_if_multi_set_cb(void *cb_arg, struct sockaddr_dl *addr, u_int cnt)
{
bcopy(LLADDR(addr), &((u8 *)cb_arg)[cnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
bcopy(LLADDR(addr),
&((u8 *)cb_arg)[cnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
IXGBE_ETH_LENGTH_OF_ADDRESS);
return (++cnt);
@ -844,11 +855,11 @@ ixv_if_multi_set_cb(void *cb_arg, struct sockaddr_dl *addr, u_int cnt)
static void
ixv_if_multi_set(if_ctx_t ctx)
{
u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
struct ixgbe_softc *sc = iflib_get_softc(ctx);
u8 *update_ptr;
if_t ifp = iflib_get_ifp(ctx);
int mcnt = 0;
u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
struct ixgbe_softc *sc = iflib_get_softc(ctx);
u8 *update_ptr;
if_t ifp = iflib_get_ifp(ctx);
int mcnt = 0;
IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
@ -908,8 +919,8 @@ static void
ixv_if_update_admin_status(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
device_t dev = iflib_get_dev(ctx);
s32 status;
device_t dev = iflib_get_dev(ctx);
s32 status;
sc->hw.mac.get_link_status = true;
@ -955,7 +966,7 @@ ixv_if_update_admin_status(if_ctx_t ctx)
static void
ixv_if_stop(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
INIT_DEBUGOUT("ixv_stop: begin\n");
@ -981,8 +992,8 @@ ixv_if_stop(if_ctx_t ctx)
static void
ixv_identify_hardware(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
device_t dev = iflib_get_dev(ctx);
struct ixgbe_softc *sc = iflib_get_softc(ctx);
device_t dev = iflib_get_dev(ctx);
struct ixgbe_hw *hw = &sc->hw;
/* Save off the information about this board */
@ -1023,22 +1034,24 @@ static int
ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
device_t dev = iflib_get_dev(ctx);
device_t dev = iflib_get_dev(ctx);
struct ix_rx_queue *rx_que = sc->rx_queues;
struct ix_tx_queue *tx_que;
int error, rid, vector = 0;
char buf[16];
int error, rid, vector = 0;
char buf[16];
for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
rid = vector + 1;
snprintf(buf, sizeof(buf), "rxq%d", i);
error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me,
buf);
if (error) {
device_printf(iflib_get_dev(ctx),
"Failed to allocate que int %d err: %d", i, error);
"Failed to allocate que int %d err: %d",
i, error);
sc->num_rx_queues = i + 1;
goto fail;
}
@ -1074,7 +1087,8 @@ ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
if (sc->hw.mac.type == ixgbe_mac_82599_vf) {
int msix_ctrl;
if (pci_find_cap(dev, PCIY_MSIX, &rid)) {
device_printf(dev, "Finding MSIX capability failed\n");
device_printf(dev,
"Finding MSIX capability failed\n");
} else {
rid += PCIR_MSIX_CTRL;
msix_ctrl = pci_read_config(dev, rid, 2);
@ -1101,21 +1115,21 @@ static int
ixv_allocate_pci_resources(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
device_t dev = iflib_get_dev(ctx);
int rid;
device_t dev = iflib_get_dev(ctx);
int rid;
rid = PCIR_BAR(0);
sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (!(sc->pci_mem)) {
device_printf(dev, "Unable to allocate bus resource: memory\n");
device_printf(dev,
"Unable to allocate bus resource: memory\n");
return (ENXIO);
}
sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
sc->osdep.mem_bus_space_handle =
rman_get_bushandle(sc->pci_mem);
sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->pci_mem);
sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
return (0);
@ -1129,7 +1143,7 @@ ixv_free_pci_resources(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_rx_queue *que = sc->rx_queues;
device_t dev = iflib_get_dev(ctx);
device_t dev = iflib_get_dev(ctx);
/* Release all MSI-X queue resources */
if (sc->intr_type == IFLIB_INTR_MSIX)
@ -1156,7 +1170,7 @@ ixv_setup_interface(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
if_softc_ctx_t scctx = sc->shared;
if_t ifp = iflib_get_ifp(ctx);
if_t ifp = iflib_get_ifp(ctx);
INIT_DEBUGOUT("ixv_setup_interface: begin");
@ -1178,7 +1192,7 @@ static uint64_t
ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
if_t ifp = iflib_get_ifp(ctx);
switch (cnt) {
case IFCOUNTER_IPACKETS:
@ -1222,16 +1236,16 @@ static void
ixv_initialize_transmit_units(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
if_softc_ctx_t scctx = sc->shared;
struct ixgbe_hw *hw = &sc->hw;
if_softc_ctx_t scctx = sc->shared;
struct ix_tx_queue *que = sc->tx_queues;
int i;
int i;
for (i = 0; i < sc->num_tx_queues; i++, que++) {
struct tx_ring *txr = &que->txr;
u64 tdba = txr->tx_paddr;
u32 txctrl, txdctl;
int j = txr->me;
u64 tdba = txr->tx_paddr;
u32 txctrl, txdctl;
int j = txr->me;
/* Set WTHRESH to 8, burst writeback */
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
@ -1281,10 +1295,10 @@ static void
ixv_initialize_rss_mapping(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
u32 reta = 0, mrqc, rss_key[10];
int queue_id;
int i, j;
u32 rss_hash_config;
u32 reta = 0, mrqc, rss_key[10];
int queue_id;
int i, j;
u32 rss_hash_config;
if (sc->feat_en & IXGBE_FEATURE_RSS) {
/* Fetch the configured RSS key */
@ -1351,18 +1365,21 @@ ixv_initialize_rss_mapping(struct ixgbe_softc *sc)
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
__func__);
device_printf(sc->dev,
"%s: RSS_HASHTYPE_RSS_IPV6_EX defined,"
" but not supported\n", __func__);
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
__func__);
device_printf(sc->dev,
"%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined,"
" but not supported\n", __func__);
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
__func__);
device_printf(sc->dev,
"%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined,"
" but not supported\n", __func__);
IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
} /* ixv_initialize_rss_mapping */
@ -1374,22 +1391,22 @@ static void
ixv_initialize_receive_units(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
if_softc_ctx_t scctx;
struct ixgbe_hw *hw = &sc->hw;
if_softc_ctx_t scctx;
struct ixgbe_hw *hw = &sc->hw;
#ifdef DEV_NETMAP
if_t ifp = iflib_get_ifp(ctx);
if_t ifp = iflib_get_ifp(ctx);
#endif
struct ix_rx_queue *que = sc->rx_queues;
u32 bufsz, psrtype;
u32 bufsz, psrtype;
bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
IXGBE_SRRCTL_BSIZEPKT_SHIFT;
psrtype = IXGBE_PSRTYPE_TCPHDR
| IXGBE_PSRTYPE_UDPHDR
| IXGBE_PSRTYPE_IPV4HDR
| IXGBE_PSRTYPE_IPV6HDR
| IXGBE_PSRTYPE_L2HDR;
psrtype = IXGBE_PSRTYPE_TCPHDR |
IXGBE_PSRTYPE_UDPHDR |
IXGBE_PSRTYPE_IPV4HDR |
IXGBE_PSRTYPE_IPV6HDR |
IXGBE_PSRTYPE_L2HDR;
if (sc->num_rx_queues > 1)
psrtype |= 1 << 29;
@ -1398,15 +1415,18 @@ ixv_initialize_receive_units(if_ctx_t ctx)
/* Tell PF our max_frame size */
if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
device_printf(sc->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
device_printf(sc->dev,
"There is a problem with the PF setup. It is likely the"
" receive unit for this VF will not function correctly."
"\n");
}
scctx = sc->shared;
for (int i = 0; i < sc->num_rx_queues; i++, que++) {
struct rx_ring *rxr = &que->rxr;
u64 rdba = rxr->rx_paddr;
u32 reg, rxdctl;
int j = rxr->me;
u64 rdba = rxr->rx_paddr;
u32 reg, rxdctl;
int j = rxr->me;
/* Disable the queue */
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
@ -1497,10 +1517,10 @@ ixv_initialize_receive_units(if_ctx_t ctx)
static void
ixv_setup_vlan_support(if_ctx_t ctx)
{
if_t ifp = iflib_get_ifp(ctx);
struct ixgbe_softc *sc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
u32 ctrl, vid, vfta, retry;
u32 ctrl, vid, vfta, retry;
/*
* We get here thru if_init, meaning
@ -1571,7 +1591,7 @@ static void
ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
u16 index, bit;
u16 index, bit;
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
@ -1589,7 +1609,7 @@ static void
ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
u16 index, bit;
u16 index, bit;
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
@ -1603,10 +1623,10 @@ ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
static void
ixv_if_enable_intr(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
struct ix_rx_queue *que = sc->rx_queues;
u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
@ -1638,7 +1658,7 @@ ixv_if_disable_intr(if_ctx_t ctx)
static int
ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_rx_queue *que = &sc->rx_queues[rxqid];
ixv_enable_queue(sc, que->rxr.me);
@ -1658,7 +1678,7 @@ static void
ixv_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
{
struct ixgbe_hw *hw = &sc->hw;
u32 ivar, index;
u32 ivar, index;
vector |= IXGBE_IVAR_ALLOC_VAL;
@ -1808,18 +1828,18 @@ ixv_update_stats(struct ixgbe_softc *sc)
static void
ixv_add_stats_sysctls(struct ixgbe_softc *sc)
{
device_t dev = sc->dev;
struct ix_tx_queue *tx_que = sc->tx_queues;
struct ix_rx_queue *rx_que = sc->rx_queues;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
device_t dev = sc->dev;
struct ix_tx_queue *tx_que = sc->tx_queues;
struct ix_rx_queue *rx_que = sc->rx_queues;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
struct ixgbevf_hw_stats *stats = &sc->stats.vf;
struct sysctl_oid *stat_node, *queue_node;
struct sysctl_oid_list *stat_list, *queue_list;
struct sysctl_oid *stat_node, *queue_node;
struct sysctl_oid_list *stat_list, *queue_list;
#define QUEUE_NAME_LEN 32
char namebuf[QUEUE_NAME_LEN];
char namebuf[QUEUE_NAME_LEN];
/* Driver Statistics */
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
@ -1922,9 +1942,9 @@ ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
static void
ixv_init_device_features(struct ixgbe_softc *sc)
{
sc->feat_cap = IXGBE_FEATURE_NETMAP
| IXGBE_FEATURE_VF
| IXGBE_FEATURE_LEGACY_TX;
sc->feat_cap = IXGBE_FEATURE_NETMAP |
IXGBE_FEATURE_VF |
IXGBE_FEATURE_LEGACY_TX;
/* A tad short on feature flags for VFs, atm. */
switch (sc->hw.mac.type) {

View file

@ -217,7 +217,7 @@ ixgbe_ping_all_vfs(struct ixgbe_softc *sc)
static void
ixgbe_vf_set_default_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
uint16_t tag)
uint16_t tag)
{
struct ixgbe_hw *hw;
uint32_t vmolr, vmvir;
@ -269,7 +269,6 @@ ixgbe_clear_vfmbmem(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
static boolean_t
ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
{
/*
* Frame size compatibility between PF and VF is only a problem on
* 82599-based cards. X540 and later support any combination of jumbo
@ -282,8 +281,8 @@ ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
case IXGBE_API_VER_1_0:
case IXGBE_API_VER_UNKNOWN:
/*
* On legacy (1.0 and older) VF versions, we don't support jumbo
* frames on either the PF or the VF.
* On legacy (1.0 and older) VF versions, we don't support
* jumbo frames on either the PF or the VF.
*/
if (sc->max_frame_size > ETHER_MAX_LEN ||
vf->maximum_frame_size > ETHER_MAX_LEN)
@ -302,8 +301,8 @@ ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
return (true);
/*
* Jumbo frames only work with VFs if the PF is also using jumbo
* frames.
* Jumbo frames only work with VFs if the PF is also using
* jumbo frames.
*/
if (sc->max_frame_size <= ETHER_MAX_LEN)
return (true);
@ -526,7 +525,7 @@ ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
static void
ixgbe_vf_set_macvlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
uint32_t *msg)
uint32_t *msg)
{
//XXX implement this
ixgbe_send_vf_failure(sc, vf, msg[0]);
@ -537,7 +536,6 @@ static void
ixgbe_vf_api_negotiate(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
uint32_t *msg)
{
switch (msg[1]) {
case IXGBE_API_VER_1_0:
case IXGBE_API_VER_1_1:
@ -553,7 +551,8 @@ ixgbe_vf_api_negotiate(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
static void
ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
uint32_t *msg)
{
struct ixgbe_hw *hw;
uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
@ -585,9 +584,9 @@ ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
static void
ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_softc *sc = iflib_get_softc(ctx);
#ifdef KTR
if_t ifp = iflib_get_ifp(ctx);
if_t ifp = iflib_get_ifp(ctx);
#endif
struct ixgbe_hw *hw;
uint32_t msg[IXGBE_VFMAILBOX_SIZE];
@ -639,13 +638,12 @@ ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
}
} /* ixgbe_process_vf_msg */
/* Tasklet for handling VF -> PF mailbox messages */
void
ixgbe_handle_mbx(void *context)
{
if_ctx_t ctx = context;
struct ixgbe_softc *sc = iflib_get_softc(ctx);
if_ctx_t ctx = context;
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw;
struct ixgbe_vf *vf;
int i;
@ -656,13 +654,16 @@ ixgbe_handle_mbx(void *context)
vf = &sc->vfs[i];
if (vf->flags & IXGBE_VF_ACTIVE) {
if (hw->mbx.ops[vf->pool].check_for_rst(hw, vf->pool) == 0)
if (hw->mbx.ops[vf->pool].check_for_rst(hw,
vf->pool) == 0)
ixgbe_process_vf_reset(sc, vf);
if (hw->mbx.ops[vf->pool].check_for_msg(hw, vf->pool) == 0)
if (hw->mbx.ops[vf->pool].check_for_msg(hw,
vf->pool) == 0)
ixgbe_process_vf_msg(ctx, vf);
if (hw->mbx.ops[vf->pool].check_for_ack(hw, vf->pool) == 0)
if (hw->mbx.ops[vf->pool].check_for_ack(hw,
vf->pool) == 0)
ixgbe_process_vf_ack(sc, vf);
}
}
@ -799,27 +800,27 @@ ixgbe_initialize_iov(struct ixgbe_softc *sc)
/* RMW appropriate registers based on IOV mode */
/* Read... */
mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
/* Modify... */
mrqc &= ~IXGBE_MRQC_MRQE_MASK;
mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
mrqc &= ~IXGBE_MRQC_MRQE_MASK;
mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
gpie &= ~IXGBE_GPIE_VTMODE_MASK;
gpie &= ~IXGBE_GPIE_VTMODE_MASK;
switch (sc->iov_mode) {
case IXGBE_64_VM:
mrqc |= IXGBE_MRQC_VMDQRSS64EN;
mtqc |= IXGBE_MTQC_64VF;
mrqc |= IXGBE_MRQC_VMDQRSS64EN;
mtqc |= IXGBE_MTQC_64VF;
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
gpie |= IXGBE_GPIE_VTMODE_64;
gpie |= IXGBE_GPIE_VTMODE_64;
break;
case IXGBE_32_VM:
mrqc |= IXGBE_MRQC_VMDQRSS32EN;
mtqc |= IXGBE_MTQC_32VF;
mrqc |= IXGBE_MRQC_VMDQRSS32EN;
mtqc |= IXGBE_MTQC_32VF;
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
gpie |= IXGBE_GPIE_VTMODE_32;
gpie |= IXGBE_GPIE_VTMODE_32;
break;
default:
panic("Unexpected SR-IOV mode %d", sc->iov_mode);

View file

@ -1,4 +1,4 @@
/******************************************************************************
/*****************************************************************************
Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
@ -29,7 +29,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
*****************************************************************************/
#ifndef IXGBE_STANDALONE_BUILD
#include "opt_inet.h"
@ -80,7 +80,7 @@ ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
{
uint32_t vlan_macip_lens, type_tucmd_mlhl;
uint32_t olinfo_status, mss_l4len_idx, pktlen, offload;
u8 ehdrlen;
u8 ehdrlen;
offload = true;
olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
@ -105,9 +105,12 @@ ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
/* First check if TSO is to be used */
if (pi->ipi_csum_flags & CSUM_TSO) {
/* This is used in the transmit desc in encap */
pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen -
pi->ipi_tcp_hlen;
mss_l4len_idx |=
(pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
mss_l4len_idx |=
(pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
}
olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT;
@ -126,7 +129,8 @@ ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
switch (pi->ipi_ipproto) {
case IPPROTO_TCP:
if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_TSO))
if (pi->ipi_csum_flags &
(CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_TSO))
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
else
offload = false;
@ -168,17 +172,17 @@ ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
static int
ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
{
struct ixgbe_softc *sc = arg;
if_softc_ctx_t scctx = sc->shared;
struct ix_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
struct tx_ring *txr = &que->txr;
int nsegs = pi->ipi_nsegs;
bus_dma_segment_t *segs = pi->ipi_segs;
union ixgbe_adv_tx_desc *txd = NULL;
struct ixgbe_softc *sc = arg;
if_softc_ctx_t scctx = sc->shared;
struct ix_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
struct tx_ring *txr = &que->txr;
int nsegs = pi->ipi_nsegs;
bus_dma_segment_t *segs = pi->ipi_segs;
union ixgbe_adv_tx_desc *txd = NULL;
struct ixgbe_adv_tx_context_desc *TXD;
int i, j, first, pidx_last;
uint32_t olinfo_status, cmd, flags;
qidx_t ntxd;
int i, j, first, pidx_last;
uint32_t olinfo_status, cmd, flags;
qidx_t ntxd;
cmd = (IXGBE_ADVTXD_DTYP_DATA |
IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
@ -249,9 +253,9 @@ ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
static void
ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
{
struct ixgbe_softc *sc = arg;
struct ixgbe_softc *sc = arg;
struct ix_tx_queue *que = &sc->tx_queues[txqid];
struct tx_ring *txr = &que->txr;
struct tx_ring *txr = &que->txr;
IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx);
} /* ixgbe_isc_txd_flush */
@ -263,14 +267,14 @@ static int
ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
{
struct ixgbe_softc *sc = arg;
if_softc_ctx_t scctx = sc->shared;
if_softc_ctx_t scctx = sc->shared;
struct ix_tx_queue *que = &sc->tx_queues[txqid];
struct tx_ring *txr = &que->txr;
qidx_t processed = 0;
int updated;
qidx_t cur, prev, ntxd, rs_cidx;
int32_t delta;
uint8_t status;
struct tx_ring *txr = &que->txr;
qidx_t processed = 0;
int updated;
qidx_t cur, prev, ntxd, rs_cidx;
int32_t delta;
uint8_t status;
rs_cidx = txr->tx_rs_cidx;
if (rs_cidx == txr->tx_rs_pidx)
@ -319,9 +323,9 @@ ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
static void
ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
{
struct ixgbe_softc *sc = arg;
struct ix_rx_queue *que = &sc->rx_queues[iru->iru_qsidx];
struct rx_ring *rxr = &que->rxr;
struct ixgbe_softc *sc = arg;
struct ix_rx_queue *que = &sc->rx_queues[iru->iru_qsidx];
struct rx_ring *rxr = &que->rxr;
uint64_t *paddrs;
int i;
uint32_t next_pidx, pidx;
@ -342,11 +346,12 @@ ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
* ixgbe_isc_rxd_flush
************************************************************************/
static void
ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx)
ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused,
qidx_t pidx)
{
struct ixgbe_softc *sc = arg;
struct ixgbe_softc *sc = arg;
struct ix_rx_queue *que = &sc->rx_queues[qsidx];
struct rx_ring *rxr = &que->rxr;
struct rx_ring *rxr = &que->rxr;
IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx);
} /* ixgbe_isc_rxd_flush */
@ -357,12 +362,12 @@ ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pi
static int
ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
{
struct ixgbe_softc *sc = arg;
struct ix_rx_queue *que = &sc->rx_queues[qsidx];
struct rx_ring *rxr = &que->rxr;
struct ixgbe_softc *sc = arg;
struct ix_rx_queue *que = &sc->rx_queues[qsidx];
struct rx_ring *rxr = &que->rxr;
union ixgbe_adv_rx_desc *rxd;
uint32_t staterr;
int cnt, i, nrxd;
uint32_t staterr;
int cnt, i, nrxd;
nrxd = sc->shared->isc_nrxd[0];
for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) {
@ -391,16 +396,16 @@ ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
static int
ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
struct ixgbe_softc *sc = arg;
if_softc_ctx_t scctx = sc->shared;
struct ix_rx_queue *que = &sc->rx_queues[ri->iri_qsidx];
struct rx_ring *rxr = &que->rxr;
union ixgbe_adv_rx_desc *rxd;
struct ixgbe_softc *sc = arg;
if_softc_ctx_t scctx = sc->shared;
struct ix_rx_queue *que = &sc->rx_queues[ri->iri_qsidx];
struct rx_ring *rxr = &que->rxr;
union ixgbe_adv_rx_desc *rxd;
uint16_t pkt_info, len, cidx, i;
uint32_t ptype;
uint32_t staterr = 0;
bool eop;
uint16_t pkt_info, len, cidx, i;
uint32_t ptype;
uint32_t staterr = 0;
bool eop;
i = 0;
cidx = ri->iri_cidx;
@ -425,7 +430,8 @@ ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
/* Make sure bad packets are discarded */
if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
if (sc->feat_en & IXGBE_FEATURE_VF)
if_inc_counter(ri->iri_ifp, IFCOUNTER_IERRORS, 1);
if_inc_counter(ri->iri_ifp, IFCOUNTER_IERRORS,
1);
rxr->rx_discarded++;
return (EBADMSG);
@ -478,7 +484,8 @@ ixgbe_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
uint8_t errors = (uint8_t)(staterr >> 24);
/* If there is a layer 3 or 4 error we are done */
if (__predict_false(errors & (IXGBE_RXD_ERR_IPE | IXGBE_RXD_ERR_TCPE)))
if (__predict_false(errors &
(IXGBE_RXD_ERR_IPE | IXGBE_RXD_ERR_TCPE)))
return;
/* IP Checksum Good */
@ -492,7 +499,8 @@ ixgbe_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
(ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)) {
ri->iri_csum_flags |= CSUM_SCTP_VALID;
} else {
ri->iri_csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
ri->iri_csum_flags |=
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
ri->iri_csum_data = htons(0xffff);
}
}

View file

@ -1,4 +1,4 @@
/******************************************************************************
/*****************************************************************************
SPDX-License-Identifier: BSD-3-Clause
Copyright (c) 2001-2017, Intel Corporation
@ -30,7 +30,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
*****************************************************************************/
#ifndef _IXGBE_H_
#define _IXGBE_H_
@ -435,8 +435,8 @@ struct ixgbe_softc {
struct ixgbe_bp_data bypass;
/* Firmware error check */
int recovery_mode;
struct callout fw_mode_timer;
int recovery_mode;
struct callout fw_mode_timer;
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;