Update the Intel i40e drivers, ixl version 1.2.8, ixlv version 1.1.18

-Improved VF stability, thanks to changes from Ryan Stone,
	 and Juniper.
	- RSS fixes in the ixlv driver
	- link detection in the ixlv driver
	- New sysctl's added in ixl and ixlv
	- reset timeout increased for ixlv
	- stability fixes in detach
	- correct media reporting
	- Coverity warnings fixed
	- Many small bug fixes
	- VF Makefile modified - nvm shared code needed
	- remove unused sleep channels in ixlv_sc struct

Submitted by: Eric Joyner (committed by jfv)
MFC after:	1 week
This commit is contained in:
Jack F Vogel 2014-11-06 23:45:05 +00:00
parent 2b293f6aa1
commit e5100ee278
9 changed files with 1070 additions and 594 deletions

View file

@ -107,6 +107,7 @@ i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
"error %u\n", err);
goto fail_2;
}
mem->nseg = 1;
mem->size = size;
bus_dmamap_sync(mem->tag, mem->map,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

View file

@ -147,8 +147,7 @@ void prefetch(void *x)
#define prefetch(x)
#endif
struct i40e_osdep
{
struct i40e_osdep {
bus_space_tag_t mem_bus_space_tag;
bus_space_handle_t mem_bus_space_handle;
bus_size_t mem_bus_space_size;

View file

@ -40,7 +40,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
char ixl_driver_version[] = "1.2.2";
char ixl_driver_version[] = "1.2.8";
/*********************************************************************
* PCI Device ID Table
@ -109,6 +109,7 @@ static bool ixl_config_link(struct i40e_hw *);
static void ixl_config_rss(struct ixl_vsi *);
static void ixl_set_queue_rx_itr(struct ixl_queue *);
static void ixl_set_queue_tx_itr(struct ixl_queue *);
static int ixl_set_advertised_speeds(struct ixl_pf *, int);
static void ixl_enable_rings(struct ixl_vsi *);
static void ixl_disable_rings(struct ixl_vsi *);
@ -155,6 +156,7 @@ static void ixl_do_adminq(void *, int);
static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
/* Statistics */
static void ixl_add_hw_stats(struct ixl_pf *);
@ -176,7 +178,8 @@ static void ixl_stat_update32(struct i40e_hw *, u32, bool,
static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
#endif
@ -276,6 +279,7 @@ int ixl_atr_rate = 20;
TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
#endif
static char *ixl_fc_string[6] = {
"None",
"Rx",
@ -398,6 +402,11 @@ ixl_attach(device_t dev)
OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_current_speed, "A", "Current Port Speed");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "rx_itr", CTLFLAG_RW,
@ -436,8 +445,13 @@ ixl_attach(device_t dev)
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "hw_res_info", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_hw_res_info, "A", "HW Resource Allocation");
OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
@ -445,7 +459,7 @@ ixl_attach(device_t dev)
pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
#endif
/* Save off the information about this board */
/* Save off the PCI information */
hw->vendor_id = pci_get_vendor(dev);
hw->device_id = pci_get_device(dev);
hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
@ -593,6 +607,7 @@ ixl_attach(device_t dev)
bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
i40e_get_port_mac_addr(hw, hw->mac.port_addr);
/* Set up VSI and queues */
if (ixl_setup_stations(pf) != 0) {
device_printf(dev, "setup stations failed!\n");
error = ENOMEM;
@ -630,8 +645,11 @@ ixl_attach(device_t dev)
"an unqualified module was detected\n");
/* Setup OS specific network interface */
if (ixl_setup_interface(dev, vsi) != 0)
if (ixl_setup_interface(dev, vsi) != 0) {
device_printf(dev, "interface setup failed!\n");
error = EIO;
goto err_late;
}
/* Get the bus configuration and set the shared code */
bus = ixl_get_bus_info(hw, dev);
@ -642,25 +660,32 @@ ixl_attach(device_t dev)
ixl_update_stats_counters(pf);
ixl_add_hw_stats(pf);
/* Reset port's advertised speeds */
if (!i40e_is_40G_device(hw->device_id)) {
pf->advertised_speed = 0x7;
ixl_set_advertised_speeds(pf, 0x7);
}
/* Register for VLAN events */
vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
INIT_DEBUGOUT("ixl_attach: end");
return (0);
err_late:
ixl_free_vsi(vsi);
if (vsi->ifp != NULL)
if_free(vsi->ifp);
err_mac_hmc:
i40e_shutdown_lan_hmc(hw);
err_get_cap:
i40e_shutdown_adminq(hw);
err_out:
if (vsi->ifp != NULL)
if_free(vsi->ifp);
ixl_free_pci_resources(pf);
ixl_free_vsi(vsi);
IXL_PF_LOCK_DESTROY(pf);
return (error);
}
@ -725,6 +750,7 @@ ixl_detach(device_t dev)
ether_ifdetach(vsi->ifp);
callout_drain(&pf->timer);
ixl_free_pci_resources(pf);
bus_generic_detach(dev);
if_free(vsi->ifp);
@ -2246,6 +2272,34 @@ early:
return;
}
static void
ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
{
/* Display supported media types */
if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
}
/*********************************************************************
*
@ -2276,7 +2330,7 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = ixl_ioctl;
#if __FreeBSD_version >= 1100000
#if __FreeBSD_version >= 1100036
if_setgetcounterfn(ifp, ixl_get_counter);
#endif
@ -2286,8 +2340,6 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
ifp->if_snd.ifq_maxlen = que->num_desc - 2;
ether_ifattach(ifp, hw->mac.addr);
vsi->max_frame_size =
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ ETHER_VLAN_ENCAP_LEN;
@ -2328,40 +2380,26 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
ixl_media_status);
aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
if (aq_error) {
printf("Error getting supported media types, AQ error %d\n", aq_error);
return (EPERM);
}
/* Display supported media types */
if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
if (aq_error == I40E_ERR_UNKNOWN_PHY) {
/* Need delay to detect fiber correctly */
i40e_msec_delay(200);
aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
if (aq_error == I40E_ERR_UNKNOWN_PHY)
device_printf(dev, "Unknown PHY type detected!\n");
else
ixl_add_ifmedia(vsi, abilities_resp.phy_type);
} else if (aq_error) {
device_printf(dev, "Error getting supported media types, err %d,"
" AQ error %d\n", aq_error, hw->aq.asq_last_status);
} else
ixl_add_ifmedia(vsi, abilities_resp.phy_type);
/* Use autoselect media by default */
ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
ether_ifattach(ifp, hw->mac.addr);
return (0);
}
@ -3728,10 +3766,6 @@ ixl_update_stats_counters(struct ixl_pf *pf)
pf->stat_offsets_loaded,
&osd->eth.rx_discards,
&nsd->eth.rx_discards);
ixl_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
pf->stat_offsets_loaded,
&osd->eth.tx_discards,
&nsd->eth.tx_discards);
ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
I40E_GLPRT_UPRCL(hw->port),
pf->stat_offsets_loaded,
@ -3915,8 +3949,8 @@ ixl_do_adminq(void *context, int pending)
u32 reg, loop = 0;
u16 opcode, result;
event.msg_len = IXL_AQ_BUF_SZ;
event.msg_buf = malloc(event.msg_len,
event.buf_len = IXL_AQ_BUF_SZ;
event.msg_buf = malloc(event.buf_len,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (!event.msg_buf) {
printf("Unable to allocate adminq memory\n");
@ -4300,6 +4334,52 @@ ixl_current_speed(SYSCTL_HANDLER_ARGS)
return (error);
}
static int
ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_aq_set_phy_config config;
enum i40e_status_code aq_error = 0;
/* Get current capability information */
aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
if (aq_error) {
device_printf(dev, "%s: Error getting phy capabilities %d,"
" aq error: %d\n", __func__, aq_error,
hw->aq.asq_last_status);
return (EAGAIN);
}
/* Prepare new config */
bzero(&config, sizeof(config));
config.phy_type = abilities.phy_type;
config.abilities = abilities.abilities
| I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
/* Translate into aq cmd link_speed */
if (speeds & 0x4)
config.link_speed |= I40E_LINK_SPEED_10GB;
if (speeds & 0x2)
config.link_speed |= I40E_LINK_SPEED_1GB;
if (speeds & 0x1)
config.link_speed |= I40E_LINK_SPEED_100MB;
/* Do aq command & restart link */
aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
if (aq_error) {
device_printf(dev, "%s: Error setting new phy config %d,"
" aq error: %d\n", __func__, aq_error,
hw->aq.asq_last_status);
return (EAGAIN);
}
return (0);
}
/*
** Control link advertise speed:
** Flags:
@ -4315,10 +4395,7 @@ ixl_set_advertise(SYSCTL_HANDLER_ARGS)
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_aq_set_phy_config config;
int requested_ls = 0;
enum i40e_status_code aq_error = 0;
int error = 0;
/*
@ -4343,39 +4420,9 @@ ixl_set_advertise(SYSCTL_HANDLER_ARGS)
if (pf->advertised_speed == requested_ls)
return (0);
/* Get current capability information */
aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
if (aq_error) {
device_printf(dev, "%s: Error getting phy capabilities %d,"
" aq error: %d\n", __func__, aq_error,
hw->aq.asq_last_status);
return (EAGAIN);
}
/* Prepare new config */
bzero(&config, sizeof(config));
config.phy_type = abilities.phy_type;
config.abilities = abilities.abilities
| I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
/* Translate into aq cmd link_speed */
if (requested_ls & 0x4)
config.link_speed |= I40E_LINK_SPEED_10GB;
if (requested_ls & 0x2)
config.link_speed |= I40E_LINK_SPEED_1GB;
if (requested_ls & 0x1)
config.link_speed |= I40E_LINK_SPEED_100MB;
/* Do aq command & restart link */
aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
if (aq_error) {
device_printf(dev, "%s: Error setting new phy config %d,"
" aq error: %d\n", __func__, aq_error,
hw->aq.asq_last_status);
return (EAGAIN);
}
error = ixl_set_advertised_speeds(pf, requested_ls);
if (error)
return (error);
pf->advertised_speed = requested_ls;
ixl_update_link_status(pf);
@ -4454,6 +4501,26 @@ ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
return (link);
}
static int
ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
char buf[32];
snprintf(buf, sizeof(buf),
"f%d.%d a%d.%d n%02x.%02x e%08x",
hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
(hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
IXL_NVM_VERSION_HI_SHIFT,
(hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
IXL_NVM_VERSION_LO_SHIFT,
hw->nvm.eetrack);
return (sysctl_handle_string(oidp, buf, strlen(buf), req));
}
#ifdef IXL_DEBUG
static int
ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
@ -4563,7 +4630,7 @@ ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
#define IXL_SW_RES_SIZE 0x14
static int
ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS)
ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
@ -4620,7 +4687,120 @@ ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS)
device_printf(dev, "sysctl error: %d\n", error);
sbuf_delete(buf);
return error;
}
/*
** Caller must init and delete sbuf; this function will clear and
** finish it for caller.
*/
static char *
ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
{
sbuf_clear(s);
if (seid == 0 && uplink)
sbuf_cat(s, "Network");
else if (seid == 0)
sbuf_cat(s, "Host");
else if (seid == 1)
sbuf_cat(s, "EMP");
else if (seid <= 5)
sbuf_printf(s, "MAC %d", seid - 2);
else if (seid <= 15)
sbuf_cat(s, "Reserved");
else if (seid <= 31)
sbuf_printf(s, "PF %d", seid - 16);
else if (seid <= 159)
sbuf_printf(s, "VF %d", seid - 32);
else if (seid <= 287)
sbuf_cat(s, "Reserved");
else if (seid <= 511)
sbuf_cat(s, "Other"); // for other structures
else if (seid <= 895)
sbuf_printf(s, "VSI %d", seid - 512);
else if (seid <= 1023)
sbuf_printf(s, "Reserved");
else
sbuf_cat(s, "Invalid");
sbuf_finish(s);
return sbuf_data(s);
}
static int
ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
struct sbuf *buf;
struct sbuf *nmbuf;
int error = 0;
u8 aq_buf[I40E_AQ_LARGE_BUF];
u16 next = 0;
struct i40e_aqc_get_switch_config_resp *sw_config;
sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
if (!buf) {
device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
return (ENOMEM);
}
error = i40e_aq_get_switch_config(hw, sw_config,
sizeof(aq_buf), &next, NULL);
if (error) {
device_printf(dev, "%s: aq_get_switch_config() error %d, aq error %d\n",
__func__, error, hw->aq.asq_last_status);
sbuf_delete(buf);
return error;
}
nmbuf = sbuf_new_auto();
if (!nmbuf) {
device_printf(dev, "Could not allocate sbuf for name output.\n");
return (ENOMEM);
}
sbuf_cat(buf, "\n");
// Assuming <= 255 elements in switch
sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
/* Exclude:
** Revision -- all elements are revision 1 for now
*/
sbuf_printf(buf,
"SEID ( Name ) | Uplink | Downlink | Conn Type\n"
" | | | (uplink)\n");
for (int i = 0; i < sw_config->header.num_reported; i++) {
// "%4d (%8s) | %8s %8s %#8x",
sbuf_printf(buf, "%4d", sw_config->element[i].seid);
sbuf_cat(buf, " ");
sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, sw_config->element[i].seid, false));
sbuf_cat(buf, " | ");
sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].uplink_seid, true));
sbuf_cat(buf, " ");
sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].downlink_seid, false));
sbuf_cat(buf, " ");
sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
if (i < sw_config->header.num_reported - 1)
sbuf_cat(buf, "\n");
}
sbuf_delete(nmbuf);
error = sbuf_finish(buf);
if (error) {
device_printf(dev, "Error finishing sbuf: %d\n", error);
sbuf_delete(buf);
return error;
}
error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
if (error)
device_printf(dev, "sysctl error: %d\n", error);
sbuf_delete(buf);
return (error);
}
/*

File diff suppressed because it is too large Load diff

View file

@ -162,7 +162,9 @@
/*
** Default number of entries in Tx queue buf_ring.
*/
#define DEFAULT_TXBRSZ (4096 * 4096)
#define SMALL_TXBRSZ 4096
/* This may require mbuf cluster tuning */
#define DEFAULT_TXBRSZ (SMALL_TXBRSZ * SMALL_TXBRSZ)
/* Alignment for rings */
#define DBA_ALIGN 128
@ -194,7 +196,7 @@
#define MAX_MULTICAST_ADDR 128
#define IXL_BAR 3
#define IXL_BAR 3
#define IXL_ADM_LIMIT 2
#define IXL_TSO_SIZE 65535
#define IXL_TX_BUF_SZ ((u32) 1514)
@ -208,7 +210,7 @@
#define IXL_ITR_NONE 3
#define IXL_QUEUE_EOL 0x7FF
#define IXL_MAX_FRAME 0x2600
#define IXL_MAX_TX_SEGS 8
#define IXL_MAX_TX_SEGS 8
#define IXL_MAX_TSO_SEGS 66
#define IXL_SPARSE_CHAIN 6
#define IXL_QUEUE_HUNG 0x80000000
@ -292,7 +294,6 @@
#define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count)
#endif
/*
*****************************************************************************
* vendor_info_array
@ -476,6 +477,7 @@ struct ixl_vsi {
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
bool stat_offsets_loaded;
/* VSI stat counters */
u64 ipackets;
u64 ierrors;
u64 opackets;
@ -523,7 +525,8 @@ ixl_get_filter(struct ixl_vsi *vsi)
/* create a new empty filter */
f = malloc(sizeof(struct ixl_mac_filter),
M_DEVBUF, M_NOWAIT | M_ZERO);
SLIST_INSERT_HEAD(&vsi->ftl, f, next);
if (f)
SLIST_INSERT_HEAD(&vsi->ftl, f, next);
return (f);
}

View file

@ -238,6 +238,11 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
maxsegs = IXL_MAX_TSO_SEGS;
if (ixl_tso_detect_sparse(m_head)) {
m = m_defrag(m_head, M_NOWAIT);
if (m == NULL) {
m_freem(*m_headp);
*m_headp = NULL;
return (ENOBUFS);
}
*m_headp = m;
}
}
@ -791,6 +796,7 @@ ixl_txeof(struct ixl_queue *que)
mtx_assert(&txr->mtx, MA_OWNED);
/* These are not the descriptors you seek, move along :) */
if (txr->avail == que->num_desc) {
que->busy = 0;
@ -1186,6 +1192,9 @@ skip_head:
rxr->bytes = 0;
rxr->discard = FALSE;
wr32(vsi->hw, rxr->tail, que->num_desc - 1);
ixl_flush(vsi->hw);
#if defined(INET6) || defined(INET)
/*
** Now set up the LRO interface:
@ -1365,6 +1374,7 @@ ixl_rxeof(struct ixl_queue *que, int count)
IXL_RX_LOCK(rxr);
for (i = rxr->next_check; count != 0;) {
struct mbuf *sendmp, *mh, *mp;
u32 rsc, status, error;
@ -1660,3 +1670,4 @@ ixl_get_counter(if_t ifp, ift_counter cnt)
}
}
#endif

View file

@ -36,11 +36,13 @@
#ifndef _IXLV_H_
#define _IXLV_H_
#define IXLV_AQ_MAX_ERR 100
#include "ixlv_vc_mgr.h"
#define IXLV_AQ_MAX_ERR 1000
#define IXLV_MAX_FILTERS 128
#define IXLV_MAX_QUEUES 16
#define IXLV_AQ_TIMEOUT (1 * hz)
#define IXLV_CALLOUT_TIMO (hz / 50) // 20 msec
#define IXLV_MAX_QUEUES 16
#define IXLV_AQ_TIMEOUT (1 * hz)
#define IXLV_CALLOUT_TIMO (hz / 50) /* 20 msec */
#define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1)
#define IXLV_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
@ -51,8 +53,8 @@
#define IXLV_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
#define IXLV_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
#define IXLV_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
#define IXLV_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9)
#define IXLV_FLAG_AQ_GET_STATS (u32)(1 << 10)
#define IXLV_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9)
#define IXLV_FLAG_AQ_GET_STATS (u32)(1 << 10)
/* printf %b arg */
#define IXLV_FLAGS \
@ -61,6 +63,9 @@
"\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \
"\12CONFIGURE_PROMISC\13GET_STATS"
/* Hack for compatibility with 1.0.x linux pf driver */
#define I40E_VIRTCHNL_OP_EVENT 17
/* Driver state */
enum ixlv_state_t {
IXLV_START,
@ -111,12 +116,10 @@ struct ixlv_sc {
struct ifmedia media;
struct callout timer;
struct callout aq_task;
int msix;
int if_flags;
struct mtx mtx;
struct mtx aq_task_mtx;
u32 qbase;
u32 admvec;
@ -127,10 +130,8 @@ struct ixlv_sc {
struct ixl_vsi vsi;
/* Mac Filter List */
/* Filter lists */
struct mac_list *mac_filters;
/* Vlan Filter List */
struct vlan_list *vlan_filters;
/* Promiscuous mode */
@ -138,11 +139,19 @@ struct ixlv_sc {
/* Admin queue task flags */
u32 aq_wait_count;
u32 aq_required;
u32 aq_pending;
struct ixl_vc_mgr vc_mgr;
struct ixl_vc_cmd add_mac_cmd;
struct ixl_vc_cmd del_mac_cmd;
struct ixl_vc_cmd config_queues_cmd;
struct ixl_vc_cmd map_vectors_cmd;
struct ixl_vc_cmd enable_queues_cmd;
struct ixl_vc_cmd add_vlan_cmd;
struct ixl_vc_cmd del_vlan_cmd;
struct ixl_vc_cmd add_multi_cmd;
struct ixl_vc_cmd del_multi_cmd;
/* Virtual comm channel */
enum i40e_virtchnl_ops current_op;
struct i40e_virtchnl_vf_resource *vf_res;
struct i40e_virtchnl_vsi_resource *vsi_res;
@ -150,16 +159,10 @@ struct ixlv_sc {
u64 watchdog_events;
u64 admin_irq;
/* Signaling channels */
u8 init_done;
u8 config_queues_done;
u8 map_vectors_done;
u8 enable_queues_done;
u8 disable_queues_done;
u8 add_ether_done;
u8 del_ether_done;
u8 aq_buffer[IXL_AQ_BUF_SZ];
};
#define IXLV_CORE_LOCK_ASSERT(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
/*
** This checks for a zero mac addr, something that will be likely
** unless the Admin on the Host has created one.
@ -174,7 +177,7 @@ ixlv_check_ether_addr(u8 *addr)
status = FALSE;
return (status);
}
/*
** VF Common function prototypes
*/
@ -201,5 +204,6 @@ void ixlv_add_vlans(struct ixlv_sc *);
void ixlv_del_vlans(struct ixlv_sc *);
void ixlv_update_stats_counters(struct ixlv_sc *,
struct i40e_eth_stats *);
void ixlv_update_link_status(struct ixlv_sc *);
#endif /* _IXLV_H_ */

View file

@ -47,6 +47,13 @@
#define IXLV_BUSY_WAIT_DELAY 10
#define IXLV_BUSY_WAIT_COUNT 50
static void ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t,
enum i40e_status_code);
static void ixl_vc_process_next(struct ixl_vc_mgr *mgr);
static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr);
static void ixl_vc_send_current(struct ixl_vc_mgr *mgr);
#ifdef IXL_DEBUG
/*
** Validate VF messages
*/
@ -140,6 +147,7 @@ static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
else
return 0;
}
#endif
/*
** ixlv_send_pf_msg
@ -153,16 +161,17 @@ ixlv_send_pf_msg(struct ixlv_sc *sc,
struct i40e_hw *hw = &sc->hw;
device_t dev = sc->dev;
i40e_status err;
int val_err;
#ifdef IXL_DEBUG
/*
** Pre-validating messages to the PF, this might be
** removed for performance later?
** Pre-validating messages to the PF
*/
int val_err;
val_err = ixl_vc_validate_vf_msg(sc, op, msg, len);
if (val_err)
device_printf(dev, "Error validating msg to PF for op %d,"
" msglen %d: error %d\n", op, len, val_err);
#endif
err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
if (err)
@ -198,7 +207,8 @@ ixlv_send_api_ver(struct ixlv_sc *sc)
** initialized. Returns 0 if API versions match, EIO if
** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
*/
int ixlv_verify_api_ver(struct ixlv_sc *sc)
int
ixlv_verify_api_ver(struct ixlv_sc *sc)
{
struct i40e_virtchnl_version_info *pf_vvi;
struct i40e_hw *hw = &sc->hw;
@ -232,6 +242,8 @@ int ixlv_verify_api_ver(struct ixlv_sc *sc)
if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
I40E_VIRTCHNL_OP_VERSION) {
DDPRINTF(sc->dev, "Received unexpected op response: %d\n",
le32toh(event.desc.cookie_high));
err = EIO;
goto out_alloc;
}
@ -289,15 +301,15 @@ ixlv_get_vf_config(struct ixlv_sc *sc)
goto out;
}
do {
for (;;) {
err = i40e_clean_arq_element(hw, &event, NULL);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
if (++retries <= IXLV_AQ_MAX_ERR)
i40e_msec_delay(100);
i40e_msec_delay(10);
} else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
device_printf(dev, "%s: Received a response from PF,"
" opcode %d, error %d\n", __func__,
DDPRINTF(dev, "Received a response from PF,"
" opcode %d, error %d",
le32toh(event.desc.cookie_high),
le32toh(event.desc.cookie_low));
retries++;
@ -312,16 +324,17 @@ ixlv_get_vf_config(struct ixlv_sc *sc)
err = EIO;
goto out_alloc;
}
/* We retrieved the config message, with no errors */
break;
}
if (retries > IXLV_AQ_MAX_ERR) {
INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
retries);
err = ETIMEDOUT;
goto out_alloc;
}
} while (err);
}
memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
i40e_vf_parse_hw_config(hw, sc->vf_res);
@ -345,28 +358,18 @@ ixlv_configure_queues(struct ixlv_sc *sc)
struct ixl_queue *que = vsi->queues;
struct tx_ring *txr;
struct rx_ring *rxr;
int len, pairs;;
int len, pairs;
struct i40e_virtchnl_vsi_queue_config_info *vqci;
struct i40e_virtchnl_queue_pair_info *vqpi;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
#ifdef IXL_DEBUG
device_printf(dev, "%s: command %d pending\n",
__func__, sc->current_op);
#endif
return;
}
pairs = vsi->num_queues;
sc->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
(sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (!vqci) {
device_printf(dev, "%s: unable to allocate memory\n", __func__);
ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
vqci->vsi_id = sc->vsi_res->vsi_id;
@ -375,7 +378,7 @@ ixlv_configure_queues(struct ixlv_sc *sc)
/* Size check is not needed here - HW max is 16 queue pairs, and we
* can fit info for 31 of them into the AQ buffer before it overflows.
*/
for (int i = 0; i < pairs; i++, que++) {
for (int i = 0; i < pairs; i++, que++, vqpi++) {
txr = &que->txr;
rxr = &que->rxr;
vqpi->txq.vsi_id = vqci->vsi_id;
@ -393,14 +396,12 @@ ixlv_configure_queues(struct ixlv_sc *sc)
vqpi->rxq.dma_ring_addr = rxr->dma.pa;
vqpi->rxq.max_pkt_size = vsi->max_frame_size;
vqpi->rxq.databuffer_size = rxr->mbuf_sz;
vqpi++;
vqpi->rxq.splithdr_enabled = 0;
}
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
(u8 *)vqci, len);
free(vqci, M_DEVBUF);
sc->aq_pending |= IXLV_FLAG_AQ_CONFIGURE_QUEUES;
sc->aq_required &= ~IXLV_FLAG_AQ_CONFIGURE_QUEUES;
}
/*
@ -413,22 +414,11 @@ ixlv_enable_queues(struct ixlv_sc *sc)
{
struct i40e_virtchnl_queue_select vqs;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* we already have a command pending */
#ifdef IXL_DEBUG
device_printf(sc->dev, "%s: command %d pending\n",
__func__, sc->current_op);
#endif
return;
}
sc->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
vqs.vsi_id = sc->vsi_res->vsi_id;
vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
vqs.rx_queues = vqs.tx_queues;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
sc->aq_pending |= IXLV_FLAG_AQ_ENABLE_QUEUES;
sc->aq_required &= ~IXLV_FLAG_AQ_ENABLE_QUEUES;
}
/*
@ -441,22 +431,11 @@ ixlv_disable_queues(struct ixlv_sc *sc)
{
struct i40e_virtchnl_queue_select vqs;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* we already have a command pending */
#ifdef IXL_DEBUG
device_printf(sc->dev, "%s: command %d pending\n",
__func__, sc->current_op);
#endif
return;
}
sc->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
vqs.vsi_id = sc->vsi_res->vsi_id;
vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
vqs.rx_queues = vqs.tx_queues;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
sc->aq_pending |= IXLV_FLAG_AQ_DISABLE_QUEUES;
sc->aq_required &= ~IXLV_FLAG_AQ_DISABLE_QUEUES;
}
/*
@ -473,16 +452,6 @@ ixlv_map_queues(struct ixlv_sc *sc)
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* we already have a command pending */
#ifdef IXL_DEBUG
device_printf(sc->dev, "%s: command %d pending\n",
__func__, sc->current_op);
#endif
return;
}
sc->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
/* How many queue vectors, adminq uses one */
q = sc->msix - 1;
@ -491,6 +460,7 @@ ixlv_map_queues(struct ixlv_sc *sc)
vm = malloc(len, M_DEVBUF, M_NOWAIT);
if (!vm) {
printf("%s: unable to allocate memory\n", __func__);
ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
@ -501,6 +471,8 @@ ixlv_map_queues(struct ixlv_sc *sc)
vm->vecmap[i].vector_id = i + 1; /* first is adminq */
vm->vecmap[i].txq_map = (1 << que->me);
vm->vecmap[i].rxq_map = (1 << que->me);
vm->vecmap[i].rxitr_idx = 0;
vm->vecmap[i].txitr_idx = 0;
}
/* Misc vector last - this is only for AdminQ messages */
@ -508,12 +480,12 @@ ixlv_map_queues(struct ixlv_sc *sc)
vm->vecmap[i].vector_id = 0;
vm->vecmap[i].txq_map = 0;
vm->vecmap[i].rxq_map = 0;
vm->vecmap[i].rxitr_idx = 0;
vm->vecmap[i].txitr_idx = 0;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
(u8 *)vm, len);
free(vm, M_DEVBUF);
sc->aq_pending |= IXLV_FLAG_AQ_MAP_VECTORS;
sc->aq_required &= ~IXLV_FLAG_AQ_MAP_VECTORS;
}
/*
@ -529,11 +501,6 @@ ixlv_add_vlans(struct ixlv_sc *sc)
device_t dev = sc->dev;
int len, i = 0, cnt = 0;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
return;
sc->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
/* Get count of VLAN filters to add */
SLIST_FOREACH(f, sc->vlan_filters, next) {
if (f->flags & IXL_FILTER_ADD)
@ -541,8 +508,8 @@ ixlv_add_vlans(struct ixlv_sc *sc)
}
if (!cnt) { /* no work... */
sc->aq_required &= ~IXLV_FLAG_AQ_ADD_VLAN_FILTER;
sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
I40E_SUCCESS);
return;
}
@ -552,6 +519,7 @@ ixlv_add_vlans(struct ixlv_sc *sc)
if (len > IXL_AQ_BUF_SZ) {
device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
__func__);
ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
@ -559,6 +527,7 @@ ixlv_add_vlans(struct ixlv_sc *sc)
if (!v) {
device_printf(dev, "%s: unable to allocate memory\n",
__func__);
ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
@ -575,16 +544,17 @@ ixlv_add_vlans(struct ixlv_sc *sc)
if (i == cnt)
break;
}
if (i == 0) { /* Should not happen... */
device_printf(dev, "%s: i == 0?\n", __func__);
return;
}
// ERJ: Should this be taken out?
if (i == 0) { /* Should not happen... */
device_printf(dev, "%s: i == 0?\n", __func__);
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
I40E_SUCCESS);
return;
}
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
free(v, M_DEVBUF);
/* add stats? */
sc->aq_pending |= IXLV_FLAG_AQ_ADD_VLAN_FILTER;
sc->aq_required &= ~IXLV_FLAG_AQ_ADD_VLAN_FILTER;
}
/*
@ -600,11 +570,6 @@ ixlv_del_vlans(struct ixlv_sc *sc)
struct ixlv_vlan_filter *f, *ftmp;
int len, i = 0, cnt = 0;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
return;
sc->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
/* Get count of VLAN filters to delete */
SLIST_FOREACH(f, sc->vlan_filters, next) {
if (f->flags & IXL_FILTER_DEL)
@ -612,8 +577,8 @@ ixlv_del_vlans(struct ixlv_sc *sc)
}
if (!cnt) { /* no work... */
sc->aq_required &= ~IXLV_FLAG_AQ_DEL_VLAN_FILTER;
sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
I40E_SUCCESS);
return;
}
@ -623,6 +588,7 @@ ixlv_del_vlans(struct ixlv_sc *sc)
if (len > IXL_AQ_BUF_SZ) {
device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
__func__);
ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
@ -630,6 +596,7 @@ ixlv_del_vlans(struct ixlv_sc *sc)
if (!v) {
device_printf(dev, "%s: unable to allocate memory\n",
__func__);
ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
@ -647,16 +614,17 @@ ixlv_del_vlans(struct ixlv_sc *sc)
if (i == cnt)
break;
}
if (i == 0) { /* Should not happen... */
device_printf(dev, "%s: i == 0?\n", __func__);
return;
}
// ERJ: Take this out?
if (i == 0) { /* Should not happen... */
device_printf(dev, "%s: i == 0?\n", __func__);
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
I40E_SUCCESS);
return;
}
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
free(v, M_DEVBUF);
/* add stats? */
sc->aq_pending |= IXLV_FLAG_AQ_DEL_VLAN_FILTER;
sc->aq_required &= ~IXLV_FLAG_AQ_DEL_VLAN_FILTER;
}
@ -673,11 +641,6 @@ ixlv_add_ether_filters(struct ixlv_sc *sc)
device_t dev = sc->dev;
int len, j = 0, cnt = 0;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
return;
sc->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
/* Get count of MAC addresses to add */
SLIST_FOREACH(f, sc->mac_filters, next) {
if (f->flags & IXL_FILTER_ADD)
@ -685,9 +648,8 @@ ixlv_add_ether_filters(struct ixlv_sc *sc)
}
if (cnt == 0) { /* Should not happen... */
DDPRINTF(dev, "cnt == 0, exiting...");
sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
sc->aq_required &= ~IXLV_FLAG_AQ_ADD_MAC_FILTER;
wakeup(&sc->add_ether_done);
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
I40E_SUCCESS);
return;
}
@ -698,6 +660,7 @@ ixlv_add_ether_filters(struct ixlv_sc *sc)
if (a == NULL) {
device_printf(dev, "%s: Failed to get memory for "
"virtchnl_ether_addr_list\n", __func__);
ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
a->vsi_id = sc->vsi.id;
@ -722,8 +685,6 @@ ixlv_add_ether_filters(struct ixlv_sc *sc)
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
/* add stats? */
free(a, M_DEVBUF);
sc->aq_pending |= IXLV_FLAG_AQ_ADD_MAC_FILTER;
sc->aq_required &= ~IXLV_FLAG_AQ_ADD_MAC_FILTER;
return;
}
@ -740,11 +701,6 @@ ixlv_del_ether_filters(struct ixlv_sc *sc)
struct ixlv_mac_filter *f, *f_temp;
int len, j = 0, cnt = 0;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
return;
sc->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
/* Get count of MAC addresses to delete */
SLIST_FOREACH(f, sc->mac_filters, next) {
if (f->flags & IXL_FILTER_DEL)
@ -752,9 +708,8 @@ ixlv_del_ether_filters(struct ixlv_sc *sc)
}
if (cnt == 0) {
DDPRINTF(dev, "cnt == 0, exiting...");
sc->aq_required &= ~IXLV_FLAG_AQ_DEL_MAC_FILTER;
sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
wakeup(&sc->del_ether_done);
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
I40E_SUCCESS);
return;
}
@ -765,6 +720,7 @@ ixlv_del_ether_filters(struct ixlv_sc *sc)
if (d == NULL) {
device_printf(dev, "%s: Failed to get memory for "
"virtchnl_ether_addr_list\n", __func__);
ixl_vc_schedule_retry(&sc->vc_mgr);
return;
}
d->vsi_id = sc->vsi.id;
@ -787,8 +743,6 @@ ixlv_del_ether_filters(struct ixlv_sc *sc)
I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
/* add stats? */
free(d, M_DEVBUF);
sc->aq_pending |= IXLV_FLAG_AQ_DEL_MAC_FILTER;
sc->aq_required &= ~IXLV_FLAG_AQ_DEL_MAC_FILTER;
return;
}
@ -806,7 +760,6 @@ ixlv_request_reset(struct ixlv_sc *sc)
*/
wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
}
/*
@ -817,18 +770,11 @@ void
ixlv_request_stats(struct ixlv_sc *sc)
{
struct i40e_virtchnl_queue_select vqs;
int error = 0;
if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
return;
sc->current_op = I40E_VIRTCHNL_OP_GET_STATS;
vqs.vsi_id = sc->vsi_res->vsi_id;
error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
/* Low priority, we don't need to error check */
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
(u8 *)&vqs, sizeof(vqs));
/* Low priority, ok if it fails */
if (error)
sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
}
/*
@ -889,10 +835,16 @@ ixlv_vc_completion(struct ixlv_sc *sc,
switch (vpe->event) {
case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
#ifdef IXL_DEBUG
device_printf(dev, "Link change: status %d, speed %d\n",
vpe->event_data.link_event.link_status,
vpe->event_data.link_event.link_speed);
#endif
vsi->link_up =
vpe->event_data.link_event.link_status;
vsi->link_speed =
vpe->event_data.link_event.link_speed;
ixlv_update_link_status(sc);
break;
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
device_printf(dev, "PF initiated reset!\n");
@ -908,14 +860,6 @@ ixlv_vc_completion(struct ixlv_sc *sc,
return;
}
if (v_opcode != sc->current_op
&& sc->current_op != I40E_VIRTCHNL_OP_GET_STATS) {
device_printf(dev, "%s: Pending op is %d, received %d.\n",
__func__, sc->current_op, v_opcode);
sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
return;
}
/* Catch-all error response */
if (v_retval) {
device_printf(dev,
@ -933,27 +877,35 @@ ixlv_vc_completion(struct ixlv_sc *sc,
ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
break;
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
sc->aq_pending &= ~(IXLV_FLAG_AQ_ADD_MAC_FILTER);
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
v_retval);
if (v_retval) {
device_printf(dev, "WARNING: Error adding VF mac filter!\n");
device_printf(dev, "WARNING: Device may not receive traffic!\n");
}
break;
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
sc->aq_pending &= ~(IXLV_FLAG_AQ_DEL_MAC_FILTER);
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
v_retval);
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
sc->aq_pending &= ~(IXLV_FLAG_AQ_CONFIGURE_PROMISC);
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC,
v_retval);
break;
case I40E_VIRTCHNL_OP_ADD_VLAN:
sc->aq_pending &= ~(IXLV_FLAG_AQ_ADD_VLAN_FILTER);
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
v_retval);
break;
case I40E_VIRTCHNL_OP_DEL_VLAN:
sc->aq_pending &= ~(IXLV_FLAG_AQ_DEL_VLAN_FILTER);
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
v_retval);
break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
sc->aq_pending &= ~(IXLV_FLAG_AQ_ENABLE_QUEUES);
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES,
v_retval);
if (v_retval == 0) {
/* Update link status */
ixlv_update_link_status(sc);
/* Turn on all interrupts */
ixlv_enable_intr(vsi);
/* And inform the stack we're ready */
@ -962,7 +914,8 @@ ixlv_vc_completion(struct ixlv_sc *sc,
}
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
sc->aq_pending &= ~(IXLV_FLAG_AQ_DISABLE_QUEUES);
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES,
v_retval);
if (v_retval == 0) {
/* Turn off all interrupts */
ixlv_disable_intr(vsi);
@ -971,10 +924,12 @@ ixlv_vc_completion(struct ixlv_sc *sc,
}
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
sc->aq_pending &= ~(IXLV_FLAG_AQ_CONFIGURE_QUEUES);
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES,
v_retval);
break;
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
sc->aq_pending &= ~(IXLV_FLAG_AQ_MAP_VECTORS);
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
v_retval);
break;
default:
device_printf(dev,
@ -982,6 +937,181 @@ ixlv_vc_completion(struct ixlv_sc *sc,
__func__, v_opcode);
break;
}
sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
return;
}
static void
ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request)
{
switch (request) {
case IXLV_FLAG_AQ_MAP_VECTORS:
ixlv_map_queues(sc);
break;
case IXLV_FLAG_AQ_ADD_MAC_FILTER:
ixlv_add_ether_filters(sc);
break;
case IXLV_FLAG_AQ_ADD_VLAN_FILTER:
ixlv_add_vlans(sc);
break;
case IXLV_FLAG_AQ_DEL_MAC_FILTER:
ixlv_del_ether_filters(sc);
break;
case IXLV_FLAG_AQ_DEL_VLAN_FILTER:
ixlv_del_vlans(sc);
break;
case IXLV_FLAG_AQ_CONFIGURE_QUEUES:
ixlv_configure_queues(sc);
break;
case IXLV_FLAG_AQ_DISABLE_QUEUES:
ixlv_disable_queues(sc);
break;
case IXLV_FLAG_AQ_ENABLE_QUEUES:
ixlv_enable_queues(sc);
break;
}
}
void
ixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr)
{
mgr->sc = sc;
mgr->current = NULL;
TAILQ_INIT(&mgr->pending);
callout_init_mtx(&mgr->callout, &sc->mtx, 0);
}
static void
ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err)
{
struct ixl_vc_cmd *cmd;
cmd = mgr->current;
mgr->current = NULL;
cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
cmd->callback(cmd, cmd->arg, err);
ixl_vc_process_next(mgr);
}
static void
ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request,
enum i40e_status_code err)
{
struct ixl_vc_cmd *cmd;
cmd = mgr->current;
if (cmd == NULL || cmd->request != request)
return;
callout_stop(&mgr->callout);
ixl_vc_process_completion(mgr, err);
}
static void
ixl_vc_cmd_timeout(void *arg)
{
struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
IXLV_CORE_LOCK_ASSERT(mgr->sc);
ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT);
}
static void
ixl_vc_cmd_retry(void *arg)
{
struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
IXLV_CORE_LOCK_ASSERT(mgr->sc);
ixl_vc_send_current(mgr);
}
static void
ixl_vc_send_current(struct ixl_vc_mgr *mgr)
{
struct ixl_vc_cmd *cmd;
cmd = mgr->current;
ixl_vc_send_cmd(mgr->sc, cmd->request);
callout_reset(&mgr->callout, IXLV_VC_TIMEOUT, ixl_vc_cmd_timeout, mgr);
}
static void
ixl_vc_process_next(struct ixl_vc_mgr *mgr)
{
struct ixl_vc_cmd *cmd;
if (mgr->current != NULL)
return;
if (TAILQ_EMPTY(&mgr->pending))
return;
cmd = TAILQ_FIRST(&mgr->pending);
TAILQ_REMOVE(&mgr->pending, cmd, next);
mgr->current = cmd;
ixl_vc_send_current(mgr);
}
static void
ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr)
{
callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr);
}
void
ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd,
uint32_t req, ixl_vc_callback_t *callback, void *arg)
{
IXLV_CORE_LOCK_ASSERT(mgr->sc);
if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) {
if (mgr->current == cmd)
mgr->current = NULL;
else
TAILQ_REMOVE(&mgr->pending, cmd, next);
}
cmd->request = req;
cmd->callback = callback;
cmd->arg = arg;
cmd->flags |= IXLV_VC_CMD_FLAG_BUSY;
TAILQ_INSERT_TAIL(&mgr->pending, cmd, next);
ixl_vc_process_next(mgr);
}
void
ixl_vc_flush(struct ixl_vc_mgr *mgr)
{
struct ixl_vc_cmd *cmd;
IXLV_CORE_LOCK_ASSERT(mgr->sc);
KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL,
("ixlv: pending commands waiting but no command in progress"));
cmd = mgr->current;
if (cmd != NULL) {
mgr->current = NULL;
cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
}
while ((cmd = TAILQ_FIRST(&mgr->pending)) != NULL) {
TAILQ_REMOVE(&mgr->pending, cmd, next);
cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
}
callout_stop(&mgr->callout);
}

View file

@ -8,7 +8,7 @@ SRCS += opt_inet.h opt_inet6.h
SRCS += if_ixlv.c ixlvc.c ixl_txrx.c i40e_osdep.c
# Shared source
SRCS += i40e_common.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c
SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c
CFLAGS += -DSMP