Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
This series contains updates to e1000, e1000e, igb, igbvf and ixgbe.
The e1000, e1000e, igb and igbvf are single patch changes and the
remaining 11 patches are all against ixgbe.

The e1000 patch is a comment cleanup to align e1000 with the code
commenting style for /drivers/net.  It also contains a few other white
space cleanups (i.e. fix lines over 80 char, remove unnecessary blank
lines and fix the use of tabs/spaces).

The e1000e patch from Koki (Fujitsu) adds a warning when link speed is
downgraded due to SmartSpeed.

The igb patch from Stefan (Red Hat) increases the timeout in the ethtool
offline self-test because some i350 adapters would sometimes fail the
self-test because link auto negotiation may take longer than the current
4 second timeout.

The igbvf patch from Alex is meant to address several race issues that
become possible because next_to_watch could possibly be set to a value
that shows that the descriptor is done when it is not.  In order to correct
that we instead make next_to_watch a pointer that is set to NULL during
cleanup, and set to the eop_desc after the descriptor rings have been written.

The remaining patches for ixgbe are a mix of fixes and added support as well
as some cleanup.  Most notably is the added support for displaying the
number of Tx/Rx channels via ethtool by Alex.  Also Aurélien adds the
ability for reading data from SFP+ modules over i2c for diagnostic
monitoring.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-02-18 12:35:42 -05:00
commit 2f219d5fb1
14 changed files with 975 additions and 662 deletions

View file

@ -142,7 +142,8 @@ struct e1000_adapter;
#define E1000_MNG_VLAN_NONE (-1)
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
* so a DMA handle can be stored along with the buffer
*/
struct e1000_buffer {
struct sk_buff *skb;
dma_addr_t dma;

View file

@ -161,8 +161,8 @@ static int e1000_get_settings(struct net_device *netdev,
ethtool_cmd_speed_set(ecmd, adapter->link_speed);
/* unfortunately FULL_DUPLEX != DUPLEX_FULL
* and HALF_DUPLEX != DUPLEX_HALF */
* and HALF_DUPLEX != DUPLEX_HALF
*/
if (adapter->link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
@ -179,8 +179,7 @@ static int e1000_get_settings(struct net_device *netdev,
if ((hw->media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
ETH_TP_MDI_X :
ETH_TP_MDI);
ETH_TP_MDI_X : ETH_TP_MDI);
else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
@ -197,8 +196,7 @@ static int e1000_set_settings(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
/*
* MDI setting is only allowed when autoneg enabled because
/* MDI setting is only allowed when autoneg enabled because
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
@ -260,8 +258,7 @@ static u32 e1000_get_link(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
/*
* If the link is not reported up to netdev, interrupts are disabled,
/* If the link is not reported up to netdev, interrupts are disabled,
* and so the physical link state may have changed since we last
* looked. Set get_link_status to make sure that the true link
* state is interrogated, rather than pulling a cached and possibly
@ -517,15 +514,17 @@ static int e1000_set_eeprom(struct net_device *netdev,
ptr = (void *)eeprom_buff;
if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
/* need read/modify/write of first changed EEPROM word
* only the second byte of the word is being modified
*/
ret_val = e1000_read_eeprom(hw, first_word, 1,
&eeprom_buff[0]);
ptr++;
}
if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
/* need read/modify/write of last changed EEPROM word */
/* only the first byte of the word is being modified */
/* need read/modify/write of last changed EEPROM word
* only the first byte of the word is being modified
*/
ret_val = e1000_read_eeprom(hw, last_word, 1,
&eeprom_buff[last_word - first_word]);
}
@ -606,11 +605,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
rx_old = adapter->rx_ring;
err = -ENOMEM;
txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL);
txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring),
GFP_KERNEL);
if (!txdr)
goto err_alloc_tx;
rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL);
rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring),
GFP_KERNEL);
if (!rxdr)
goto err_alloc_rx;
@ -642,7 +643,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
goto err_setup_tx;
/* save the new, restore the old in order to free it,
* then restore the new back again */
* then restore the new back again
*/
adapter->rx_ring = rx_old;
adapter->tx_ring = tx_old;
@ -784,7 +786,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
if (hw->mac_type >= e1000_82543) {
REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
@ -795,14 +796,11 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
0xFFFFFFFF);
}
} else {
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
}
value = E1000_MC_TBL_SIZE;
@ -858,8 +856,9 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
*data = 0;
/* NOTE: we don't test MSI interrupts here, yet */
/* Hook up test interrupt handler just for this test */
/* NOTE: we don't test MSI interrupts here, yet
* Hook up test interrupt handler just for this test
*/
if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
netdev))
shared_int = false;
@ -1260,7 +1259,8 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
else {
/* Set the ILOS bit on the fiber Nic is half
* duplex link is detected. */
* duplex link is detected.
*/
stat_reg = er32(STATUS);
if ((stat_reg & E1000_STATUS_FD) == 0)
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
@ -1493,7 +1493,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
hw->serdes_has_link = false;
/* On some blade server designs, link establishment
* could take as long as 2-3 minutes */
* could take as long as 2-3 minutes
*/
do {
e1000_check_for_link(hw);
if (hw->serdes_has_link)
@ -1545,7 +1546,8 @@ static void e1000_diag_test(struct net_device *netdev,
e_info(hw, "offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
* interfere with test result
*/
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@ -1639,7 +1641,8 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
default:
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
* so exclude FUNC_1 ports from having WoL enabled */
* so exclude FUNC_1 ports from having WoL enabled
*/
if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
!adapter->eeprom_wol) {
wol->supported = 0;
@ -1663,7 +1666,8 @@ static void e1000_get_wol(struct net_device *netdev,
wol->wolopts = 0;
/* this function will set ->supported = 0 and return 1 if wol is not
* supported by this hardware */
* supported by this hardware
*/
if (e1000_wol_exclusion(adapter, wol) ||
!device_can_wakeup(&adapter->pdev->dev))
return;
@ -1859,7 +1863,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
}

File diff suppressed because it is too large Load diff

View file

@ -239,7 +239,6 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
* e1000_init_module is the first routine called when the driver is
* loaded. All it does is register with the PCI subsystem.
**/
static int __init e1000_init_module(void)
{
int ret;
@ -266,7 +265,6 @@ module_init(e1000_init_module);
* e1000_exit_module is called just before the driver is removed
* from memory.
**/
static void __exit e1000_exit_module(void)
{
pci_unregister_driver(&e1000_driver);
@ -301,7 +299,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
* e1000_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
static void e1000_irq_disable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@ -315,7 +312,6 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
* e1000_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
static void e1000_irq_enable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@ -398,7 +394,8 @@ static void e1000_configure(struct e1000_adapter *adapter)
e1000_configure_rx(adapter);
/* call E1000_DESC_UNUSED which always leaves
* at least 1 descriptor unused to make sure
* next_to_use != next_to_clean */
* next_to_use != next_to_clean
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
struct e1000_rx_ring *ring = &adapter->rx_ring[i];
adapter->alloc_rx_buf(adapter, ring,
@ -433,9 +430,7 @@ int e1000_up(struct e1000_adapter *adapter)
* The phy may be powered down to save power and turn off link when the
* driver is unloaded and wake on lan is not enabled (among others)
* *** this routine MUST be followed by a call to e1000_reset ***
*
**/
void e1000_power_up_phy(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@ -444,7 +439,8 @@ void e1000_power_up_phy(struct e1000_adapter *adapter)
/* Just clear the power down bit to wake the phy back up */
if (hw->media_type == e1000_media_type_copper) {
/* according to the manual, the phy will retain its
* settings across a power-down/up cycle */
* settings across a power-down/up cycle
*/
e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
@ -459,7 +455,8 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
* The PHY cannot be powered down if any of the following is true *
* (a) WoL is enabled
* (b) AMT is active
* (c) SoL/IDER session is active */
* (c) SoL/IDER session is active
*/
if (!adapter->wol && hw->mac_type >= e1000_82540 &&
hw->media_type == e1000_media_type_copper) {
u16 mii_reg = 0;
@ -529,8 +526,7 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
/*
* Setting DOWN must be after irq_disable to prevent
/* Setting DOWN must be after irq_disable to prevent
* a screaming interrupt. Setting DOWN also prevents
* tasks from rescheduling.
*/
@ -627,14 +623,14 @@ void e1000_reset(struct e1000_adapter *adapter)
* rounded up to the next 1KB and expressed in KB. Likewise,
* the Rx FIFO should be large enough to accommodate at least
* one full receive packet and is similarly rounded up and
* expressed in KB. */
* expressed in KB.
*/
pba = er32(PBA);
/* upper 16 bits has Tx packet buffer allocation size in KB */
tx_space = pba >> 16;
/* lower 16 bits has Rx packet buffer allocation size in KB */
pba &= 0xffff;
/*
* the tx fifo also stores 16 bytes of information about the tx
/* the Tx fifo also stores 16 bytes of information about the Tx
* but don't include ethernet FCS because hardware appends it
*/
min_tx_space = (hw->max_frame_size +
@ -649,7 +645,8 @@ void e1000_reset(struct e1000_adapter *adapter)
/* If current Tx allocation is less than the min Tx FIFO size,
* and the min Tx FIFO size is less than the current Rx FIFO
* allocation, take space away from current Rx allocation */
* allocation, take space away from current Rx allocation
*/
if (tx_space < min_tx_space &&
((min_tx_space - tx_space) < pba)) {
pba = pba - (min_tx_space - tx_space);
@ -663,8 +660,9 @@ void e1000_reset(struct e1000_adapter *adapter)
break;
}
/* if short on rx space, rx wins and must trump tx
* adjustment or use Early Receive if available */
/* if short on Rx space, Rx wins and must trump Tx
* adjustment or use Early Receive if available
*/
if (pba < min_rx_space)
pba = min_rx_space;
}
@ -672,8 +670,7 @@ void e1000_reset(struct e1000_adapter *adapter)
ew32(PBA, pba);
/*
* flow control settings:
/* flow control settings:
* The high water mark must be low enough to fit one full frame
* (or the size used for early receive) above it in the Rx FIFO.
* Set it to the lower of:
@ -707,7 +704,8 @@ void e1000_reset(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL);
/* clear phy power management bit if we are in gig only mode,
* which if enabled will attempt negotiation to 100Mb, which
* can cause a loss of link at power off or driver unload */
* can cause a loss of link at power off or driver unload
*/
ctrl &= ~E1000_CTRL_SWDPIN3;
ew32(CTRL, ctrl);
}
@ -808,9 +806,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
static netdev_features_t e1000_fix_features(struct net_device *netdev,
netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
/* Since there is no support for separate Rx/Tx vlan accel
* enable/disable make sure Tx flag is always in same state as Rx.
*/
if (features & NETIF_F_HW_VLAN_RX)
features |= NETIF_F_HW_VLAN_TX;
@ -1012,16 +1009,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
/*
* there is a workaround being applied below that limits
/* there is a workaround being applied below that limits
* 64-bit DMA addresses to 64-bit hardware. There are some
* 32-bit adapters that Tx hang when given 64-bit DMA addresses
*/
pci_using_dac = 0;
if ((hw->bus_type == e1000_bus_type_pcix) &&
!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
/*
* according to DMA-API-HOWTO, coherent calls will always
/* according to DMA-API-HOWTO, coherent calls will always
* succeed if the set call did
*/
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
@ -1099,7 +1094,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* before reading the EEPROM, reset the controller to
* put the device in a known good starting state */
* put the device in a known good starting state
*/
e1000_reset_hw(hw);
@ -1107,8 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (e1000_validate_eeprom_checksum(hw) < 0) {
e_err(probe, "The EEPROM Checksum Is Not Valid\n");
e1000_dump_eeprom(adapter);
/*
* set MAC address to all zeroes to invalidate and temporary
/* set MAC address to all zeroes to invalidate and temporary
* disable this device for the user. This blocks regular
* traffic while still permitting ethtool ioctls from reaching
* the hardware as well as allowing the user to run the
@ -1169,7 +1164,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* now that we have the eeprom settings, apply the special cases
* where the eeprom may be wrong or the board simply won't support
* wake on lan on a particular port */
* wake on lan on a particular port
*/
switch (pdev->device) {
case E1000_DEV_ID_82546GB_PCIE:
adapter->eeprom_wol = 0;
@ -1177,7 +1173,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
* regardless of eeprom setting
*/
if (er32(STATUS) & E1000_STATUS_FUNC_1)
adapter->eeprom_wol = 0;
break;
@ -1270,7 +1267,6 @@ err_pci_reg:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
static void e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@ -1306,7 +1302,6 @@ static void e1000_remove(struct pci_dev *pdev)
* e1000_sw_init initializes the Adapter private data structure.
* e1000_init_hw_struct MUST be called before this function
**/
static int e1000_sw_init(struct e1000_adapter *adapter)
{
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
@ -1337,7 +1332,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
* We allocate one ring per queue at run-time since we don't know the
* number of queues at compile-time.
**/
static int e1000_alloc_queues(struct e1000_adapter *adapter)
{
adapter->tx_ring = kcalloc(adapter->num_tx_queues,
@ -1367,7 +1361,6 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter)
* handler is registered with the OS, the watchdog task is started,
* and the stack is notified that the interface is ready.
**/
static int e1000_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@ -1401,7 +1394,8 @@ static int e1000_open(struct net_device *netdev)
/* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq, so we have to setup our
* clean_rx handler before we do so. */
* clean_rx handler before we do so.
*/
e1000_configure(adapter);
err = e1000_request_irq(adapter);
@ -1444,7 +1438,6 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
static int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@ -1459,7 +1452,8 @@ static int e1000_close(struct net_device *netdev)
e1000_free_all_rx_resources(adapter);
/* kill manageability vlan ID if supported, but not if a vlan with
* the same ID is registered on the host OS (let 8021q kill it) */
* the same ID is registered on the host OS (let 8021q kill it)
*/
if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
!test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
@ -1483,7 +1477,8 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
unsigned long end = begin + len;
/* First rev 82545 and 82546 need to not allow any memory
* write location to cross 64k boundary due to errata 23 */
* write location to cross 64k boundary due to errata 23
*/
if (hw->mac_type == e1000_82545 ||
hw->mac_type == e1000_ce4100 ||
hw->mac_type == e1000_82546) {
@ -1500,7 +1495,6 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
*
* Return 0 on success, negative on failure
**/
static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
struct e1000_tx_ring *txdr)
{
@ -1574,7 +1568,6 @@ setup_tx_desc_die:
*
* Return 0 on success, negative on failure
**/
int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
{
int i, err = 0;
@ -1599,7 +1592,6 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
*
* Configure the Tx unit of the MAC after a reset.
**/
static void e1000_configure_tx(struct e1000_adapter *adapter)
{
u64 tdba;
@ -1620,8 +1612,10 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
ew32(TDT, 0);
ew32(TDH, 0);
adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
E1000_TDH : E1000_82542_TDH);
adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
E1000_TDT : E1000_82542_TDT);
break;
}
@ -1676,7 +1670,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
adapter->txd_cmd |= E1000_TXD_CMD_RS;
/* Cache if we're 82544 running in PCI-X because we'll
* need this to apply a workaround later in the send path. */
* need this to apply a workaround later in the send path.
*/
if (hw->mac_type == e1000_82544 &&
hw->bus_type == e1000_bus_type_pcix)
adapter->pcix_82544 = true;
@ -1692,7 +1687,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
*
* Returns 0 on success, negative on failure
**/
static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
struct e1000_rx_ring *rxdr)
{
@ -1771,7 +1765,6 @@ setup_rx_desc_die:
*
* Return 0 on success, negative on failure
**/
int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
{
int i, err = 0;
@ -1840,7 +1833,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
/* This is useful for sniffing bad packets. */
if (adapter->netdev->features & NETIF_F_RXALL) {
/* UPE and MPE will be handled by normal PROMISC logic
* in e1000e_set_rx_mode */
* in e1000e_set_rx_mode
*/
rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
E1000_RCTL_BAM | /* RX All Bcast Pkts */
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
@ -1862,7 +1856,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
*
* Configure the Rx unit of the MAC after a reset.
**/
static void e1000_configure_rx(struct e1000_adapter *adapter)
{
u64 rdba;
@ -1895,7 +1888,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
}
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
* the Base and Length of the Rx Descriptor Ring
*/
switch (adapter->num_rx_queues) {
case 1:
default:
@ -1905,8 +1899,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
ew32(RDT, 0);
ew32(RDH, 0);
adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
E1000_RDH : E1000_82542_RDH);
adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
E1000_RDT : E1000_82542_RDT);
break;
}
@ -1932,7 +1928,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
*
* Free all transmit software resources
**/
static void e1000_free_tx_resources(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
{
@ -1955,7 +1950,6 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
*
* Free all transmit software resources
**/
void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
{
int i;
@ -1990,7 +1984,6 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
* @adapter: board private structure
* @tx_ring: ring to be cleaned
**/
static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
{
@ -2026,7 +2019,6 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
* e1000_clean_all_tx_rings - Free Tx Buffers for all queues
* @adapter: board private structure
**/
static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
{
int i;
@ -2042,7 +2034,6 @@ static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
*
* Free all receive software resources
**/
static void e1000_free_rx_resources(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring)
{
@ -2065,7 +2056,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
*
* Free all receive software resources
**/
void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
{
int i;
@ -2079,7 +2069,6 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
* @adapter: board private structure
* @rx_ring: ring to free buffers from
**/
static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring)
{
@ -2138,7 +2127,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
* e1000_clean_all_rx_rings - Free Rx Buffers for all queues
* @adapter: board private structure
**/
static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
{
int i;
@ -2198,7 +2186,6 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
*
* Returns 0 on success, negative on failure
**/
static int e1000_set_mac(struct net_device *netdev, void *p)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@ -2233,7 +2220,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
* responsible for configuring the hardware for proper unicast, multicast,
* promiscuous mode, and all-multi behavior.
**/
static void e1000_set_rx_mode(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@ -2317,10 +2303,10 @@ static void e1000_set_rx_mode(struct net_device *netdev)
}
/* write the hash table completely, write from bottom to avoid
* both stupid write combining chipsets, and flushing each write */
* both stupid write combining chipsets, and flushing each write
*/
for (i = mta_reg_count - 1; i >= 0 ; i--) {
/*
* If we are on an 82544 has an errata where writing odd
/* If we are on an 82544 has an errata where writing odd
* offsets overwrites the previous even offset, but writing
* backwards over the range solves the issue by always
* writing the odd offset first
@ -2533,7 +2519,8 @@ link_up:
/* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
* (Do the reset outside of interrupt context). */
* (Do the reset outside of interrupt context).
*/
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
/* exit immediately since reset is imminent */
@ -2543,8 +2530,7 @@ link_up:
/* Simple mode for Interrupt Throttle Rate (ITR) */
if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
/*
* Symmetric Tx/Rx gets a reduced ITR=2000;
/* Symmetric Tx/Rx gets a reduced ITR=2000;
* Total asymmetrical Tx or Rx gets ITR=8000;
* everyone else is between 2000-8000.
*/
@ -2659,16 +2645,14 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
goto set_itr_now;
}
adapter->tx_itr = e1000_update_itr(adapter,
adapter->tx_itr,
adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
adapter->total_tx_packets,
adapter->total_tx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
adapter->tx_itr = low_latency;
adapter->rx_itr = e1000_update_itr(adapter,
adapter->rx_itr,
adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
adapter->total_rx_packets,
adapter->total_rx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
@ -2696,7 +2680,8 @@ set_itr_now:
if (new_itr != adapter->itr) {
/* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is
* increasing */
* increasing
*/
new_itr = new_itr > adapter->itr ?
min(adapter->itr + (new_itr >> 2), new_itr) :
new_itr;
@ -2861,7 +2846,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
/* Workaround for Controller erratum --
* descriptor for non-tso packet in a linear SKB that follows a
* tso gets written back prematurely before the data is fully
* DMA'd to the controller */
* DMA'd to the controller
*/
if (!skb->data_len && tx_ring->last_tx_tso &&
!skb_is_gso(skb)) {
tx_ring->last_tx_tso = false;
@ -2869,7 +2855,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
/* Workaround for premature desc write-backs
* in TSO mode. Append 4-byte sentinel desc */
* in TSO mode. Append 4-byte sentinel desc
*/
if (unlikely(mss && !nr_frags && size == len && size > 8))
size -= 4;
/* work-around for errata 10 and it applies
@ -2882,7 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
size = 2015;
/* Workaround for potential 82544 hang in PCI-X. Avoid
* terminating buffers within evenly-aligned dwords. */
* terminating buffers within evenly-aligned dwords.
*/
if (unlikely(adapter->pcix_82544 &&
!((unsigned long)(skb->data + offset + size - 1) & 4) &&
size > 4))
@ -2925,12 +2913,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd);
/* Workaround for premature desc write-backs
* in TSO mode. Append 4-byte sentinel desc */
if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
* in TSO mode. Append 4-byte sentinel desc
*/
if (unlikely(mss && f == (nr_frags-1) &&
size == len && size > 8))
size -= 4;
/* Workaround for potential 82544 hang in PCI-X.
* Avoid terminating buffers within evenly-aligned
* dwords. */
* dwords.
*/
bufend = (unsigned long)
page_to_phys(skb_frag_page(frag));
bufend += offset + size - 1;
@ -3035,13 +3026,15 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64). */
* such as IA-64).
*/
wmb();
tx_ring->next_to_use = i;
writel(i, hw->hw_addr + tx_ring->tdt);
/* we need this if more than one processor can write to our tail
* at a time, it syncronizes IO on IA64/Altix systems */
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
}
@ -3090,11 +3083,13 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
netif_stop_queue(netdev);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it. */
* but since that doesn't exist yet, just open code it.
*/
smp_mb();
/* We need to check again in a case another CPU has just
* made room available. */
* made room available.
*/
if (likely(E1000_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
@ -3129,10 +3124,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
int tso;
unsigned int f;
/* This goes back to the question of how to logically map a tx queue
/* This goes back to the question of how to logically map a Tx queue
* to a flow. Right now, performance is impacted slightly negatively
* if using multiple tx queues. If the stack breaks away from a
* single qdisc implementation, we can look at this again. */
* if using multiple Tx queues. If the stack breaks away from a
* single qdisc implementation, we can look at this again.
*/
tx_ring = adapter->tx_ring;
if (unlikely(skb->len <= 0)) {
@ -3157,7 +3153,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
* overrun the FIFO, adjust the max buffer len if mss
* drops. */
* drops.
*/
if (mss) {
u8 hdr_len;
max_per_txd = min(mss << 2, max_per_txd);
@ -3173,8 +3170,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* this hardware's requirements
* NOTE: this is a TSO only workaround
* if end byte alignment not correct move us
* into the next dword */
if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
* into the next dword
*/
if ((unsigned long)(skb_tail_pointer(skb) - 1)
& 4)
break;
/* fall through */
pull_size = min((unsigned int)4, skb->data_len);
@ -3222,7 +3221,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count += nr_frags;
/* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time */
* head, otherwise try next time
*/
if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
return NETDEV_TX_BUSY;
@ -3363,9 +3363,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
/* Print Registers */
e1000_regdump(adapter);
/*
* transmit dump
*/
/* transmit dump */
pr_info("TX Desc ring0 dump\n");
/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
@ -3426,9 +3424,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
}
rx_ring_summary:
/*
* receive dump
*/
/* receive dump */
pr_info("\nRX Desc ring dump\n");
/* Legacy Receive Descriptor Format
@ -3493,7 +3489,6 @@ exit:
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
static void e1000_tx_timeout(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@ -3521,7 +3516,6 @@ static void e1000_reset_task(struct work_struct *work)
* Returns the address of the device statistics structure.
* The statistics are actually updated from the watchdog.
**/
static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
{
/* only return the current stats */
@ -3535,7 +3529,6 @@ static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
*
* Returns 0 on success, negative on failure
**/
static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@ -3573,7 +3566,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* larger slab size.
* i.e. RXBUFFER_2048 --> size-4096 slab
* however with the new *_jumbo_rx* routines, jumbo receives will use
* fragmented skbs */
* fragmented skbs
*/
if (max_frame <= E1000_RXBUFFER_2048)
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
@ -3608,7 +3602,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* e1000_update_stats - Update the board statistics counters
* @adapter: board private structure
**/
void e1000_update_stats(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@ -3619,8 +3612,7 @@ void e1000_update_stats(struct e1000_adapter *adapter)
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
/*
* Prevent stats update while adapter is being reset, or if the pci
/* Prevent stats update while adapter is being reset, or if the pci
* connection is down.
*/
if (adapter->link_speed == 0)
@ -3710,7 +3702,8 @@ void e1000_update_stats(struct e1000_adapter *adapter)
/* Rx Errors */
/* RLEC on some newer hardware can be incorrect so build
* our own version based on RUC and ROC */
* our own version based on RUC and ROC
*/
netdev->stats.rx_errors = adapter->stats.rxerrc +
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc +
@ -3764,7 +3757,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
static irqreturn_t e1000_intr(int irq, void *data)
{
struct net_device *netdev = data;
@ -3775,8 +3767,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
if (unlikely((!icr)))
return IRQ_NONE; /* Not our interrupt */
/*
* we might have caused the interrupt, but the above
/* we might have caused the interrupt, but the above
* read cleared it, and just in case the driver is
* down there is nothing to do so return handled
*/
@ -3802,7 +3793,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
__napi_schedule(&adapter->napi);
} else {
/* this really should not happen! if it does it is basically a
* bug, but not a hard error, so enable ints and continue */
* bug, but not a hard error, so enable ints and continue
*/
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
}
@ -3816,7 +3808,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
**/
static int e1000_clean(struct napi_struct *napi, int budget)
{
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
napi);
int tx_clean_complete = 0, work_done = 0;
tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
@ -3907,7 +3900,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
if (adapter->detect_tx_hung) {
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
* check with the clearing of time_stamp and movement of i
*/
adapter->detect_tx_hung = false;
if (tx_ring->buffer_info[eop].time_stamp &&
time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
@ -3954,7 +3948,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
* @csum: receive descriptor csum field
* @sk_buff: socket buffer with received data
**/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
u32 csum, struct sk_buff *skb)
{
@ -4098,7 +4091,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* recycle both page and skb */
buffer_info->skb = skb;
/* an error means any chain goes out the window
* too */
* too
*/
if (rx_ring->rx_skb_top)
dev_kfree_skb(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL;
@ -4132,22 +4126,26 @@ process_skb:
skb_shinfo(rxtop)->nr_frags,
buffer_info->page, 0, length);
/* re-use the current skb, we only consumed the
* page */
* page
*/
buffer_info->skb = skb;
skb = rxtop;
rxtop = NULL;
e1000_consume_page(buffer_info, skb, length);
} else {
/* no chain, got EOP, this buf is the packet
* copybreak to save the put_page/alloc_page */
* copybreak to save the put_page/alloc_page
*/
if (length <= copybreak &&
skb_tailroom(skb) >= length) {
u8 *vaddr;
vaddr = kmap_atomic(buffer_info->page);
memcpy(skb_tail_pointer(skb), vaddr, length);
memcpy(skb_tail_pointer(skb), vaddr,
length);
kunmap_atomic(vaddr);
/* re-use the page, so don't erase
* buffer_info->page */
* buffer_info->page
*/
skb_put(skb, length);
} else {
skb_fill_page_desc(skb, 0,
@ -4205,8 +4203,7 @@ next_desc:
return cleaned;
}
/*
* this should improve performance for small packets with large amounts
/* this should improve performance for small packets with large amounts
* of reassembly being done in the stack
*/
static void e1000_check_copybreak(struct net_device *netdev,
@ -4377,7 +4374,6 @@ next_desc:
* @rx_ring: pointer to receive ring structure
* @cleaned_count: number of buffers to allocate this pass
**/
static void
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring, int cleaned_count)
@ -4451,7 +4447,8 @@ check_page:
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64). */
* such as IA-64).
*/
wmb();
writel(i, adapter->hw.hw_addr + rx_ring->rdt);
}
@ -4461,7 +4458,6 @@ check_page:
* e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
* @adapter: address of board private structure
**/
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int cleaned_count)
@ -4532,8 +4528,7 @@ map_skb:
break; /* while !buffer_info->skb */
}
/*
* XXX if it was allocated cleanly it will never map to a
/* XXX if it was allocated cleanly it will never map to a
* boundary crossing
*/
@ -4571,7 +4566,8 @@ map_skb:
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64). */
* such as IA-64).
*/
wmb();
writel(i, hw->hw_addr + rx_ring->rdt);
}
@ -4581,7 +4577,6 @@ map_skb:
* e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
* @adapter:
**/
static void e1000_smartspeed(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@ -4594,7 +4589,8 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
if (adapter->smartspeed == 0) {
/* If Master/Slave config fault is asserted twice,
* we assume back-to-back */
* we assume back-to-back
*/
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
@ -4638,7 +4634,6 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
* @ifreq:
* @cmd:
**/
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@ -4657,7 +4652,6 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* @ifreq:
* @cmd:
**/
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd)
{
@ -4919,7 +4913,8 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
hw->autoneg = 0;
/* Make sure dplx is at most 1 bit and lsb of speed is not set
* for the switch() below to work */
* for the switch() below to work
*/
if ((spd & 1) || (dplx & ~1))
goto err_inval;
@ -5122,8 +5117,7 @@ static void e1000_shutdown(struct pci_dev *pdev)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
/* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/

View file

@ -267,7 +267,6 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
**/
void e1000_check_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
@ -319,7 +318,8 @@ void e1000_check_options(struct e1000_adapter *adapter)
.def = E1000_DEFAULT_RXD,
.arg = { .r = {
.min = E1000_MIN_RXD,
.max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
E1000_MAX_82544_RXD
}}
};
@ -488,7 +488,8 @@ void e1000_check_options(struct e1000_adapter *adapter)
/* save the setting, because the dynamic bits
* change itr.
* clear the lower two bits because they are
* used as control */
* used as control
*/
adapter->itr_setting = adapter->itr & ~3;
break;
}
@ -533,7 +534,6 @@ void e1000_check_options(struct e1000_adapter *adapter)
*
* Handles speed and duplex options on fiber adapters
**/
static void e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
@ -559,7 +559,6 @@ static void e1000_check_fiber_options(struct e1000_adapter *adapter)
*
* Handles speed and duplex options on copper adapters
**/
static void e1000_check_copper_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;

View file

@ -4830,6 +4830,13 @@ static void e1000_watchdog_task(struct work_struct *work)
&adapter->link_speed,
&adapter->link_duplex);
e1000_print_link_info(adapter);
/* check if SmartSpeed worked */
e1000e_check_downshift(hw);
if (phy->speed_downgraded)
netdev_warn(netdev,
"Link Speed was downgraded by SmartSpeed\n");
/* On supported PHYs, check for duplex mismatch only
* if link has autonegotiated at 10/100 half
*/

View file

@ -1891,7 +1891,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
} else {
hw->mac.ops.check_for_link(&adapter->hw);
if (hw->mac.autoneg)
msleep(4000);
msleep(5000);
if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
*data = 1;

View file

@ -127,8 +127,8 @@ struct igbvf_buffer {
/* Tx */
struct {
unsigned long time_stamp;
union e1000_adv_tx_desc *next_to_watch;
u16 length;
u16 next_to_watch;
u16 mapped_as_page;
};
/* Rx */

View file

@ -797,20 +797,31 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
struct sk_buff *skb;
union e1000_adv_tx_desc *tx_desc, *eop_desc;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int i, eop, count = 0;
unsigned int i, count = 0;
bool cleaned = false;
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
buffer_info = &tx_ring->buffer_info[i];
eop_desc = buffer_info->next_to_watch;
do {
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
break;
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
break;
/* clear next_to_watch to prevent false hangs */
buffer_info->next_to_watch = NULL;
while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
(count < tx_ring->count)) {
rmb(); /* read buffer_info after eop_desc status */
for (cleaned = false; !cleaned; count++) {
tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
cleaned = (tx_desc == eop_desc);
skb = buffer_info->skb;
if (skb) {
@ -831,10 +842,12 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
i++;
if (i == tx_ring->count)
i = 0;
buffer_info = &tx_ring->buffer_info[i];
}
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
}
eop_desc = buffer_info->next_to_watch;
} while (count < tx_ring->count);
tx_ring->next_to_clean = i;
@ -1961,7 +1974,6 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
context_desc->seqnum_seed = 0;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->dma = 0;
i++;
if (i == tx_ring->count)
@ -2021,7 +2033,6 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
context_desc->mss_l4len_idx = 0;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->dma = 0;
i++;
if (i == tx_ring->count)
@ -2061,8 +2072,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
struct sk_buff *skb,
unsigned int first)
struct sk_buff *skb)
{
struct igbvf_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
@ -2077,7 +2087,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->length = len;
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = false;
buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
DMA_TO_DEVICE);
@ -2100,7 +2109,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
buffer_info->length = len;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
DMA_TO_DEVICE);
@ -2109,7 +2117,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
}
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[first].next_to_watch = i;
return ++count;
@ -2120,7 +2127,6 @@ dma_error:
buffer_info->dma = 0;
buffer_info->time_stamp = 0;
buffer_info->length = 0;
buffer_info->next_to_watch = 0;
buffer_info->mapped_as_page = false;
if (count)
count--;
@ -2139,7 +2145,8 @@ dma_error:
static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
int tx_flags, int count, u32 paylen,
int tx_flags, int count,
unsigned int first, u32 paylen,
u8 hdr_len)
{
union e1000_adv_tx_desc *tx_desc = NULL;
@ -2189,6 +2196,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
* such as IA-64). */
wmb();
tx_ring->buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tail);
/* we need this if more than one processor can write to our tail
@ -2255,11 +2263,11 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
* count reflects descriptors mapped, if 0 then mapping error
* has occurred and we need to rewind the descriptor queue
*/
count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
count = igbvf_tx_map_adv(adapter, tx_ring, skb);
if (count) {
igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
skb->len, hdr_len);
first, skb->len, hdr_len);
/* Make sure there is space in the ring for the next send. */
igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
} else {

View file

@ -156,7 +156,7 @@ struct vf_macvlans {
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
@ -201,6 +201,7 @@ struct ixgbe_rx_queue_stats {
enum ixgbe_ring_state_t {
__IXGBE_TX_FDIR_INIT_DONE,
__IXGBE_TX_XPS_INIT_DONE,
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
__IXGBE_RX_RSC_ENABLED,
@ -278,15 +279,10 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 64
#ifdef IXGBE_FCOE
#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
#define IXGBE_MAX_FCOE_INDICES 8
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
#else
#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
#endif /* IXGBE_FCOE */
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
@ -624,6 +620,7 @@ enum ixgbe_state_t {
__IXGBE_DOWN,
__IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT,
__IXGBE_READ_I2C,
};
struct ixgbe_cb {
@ -704,8 +701,8 @@ extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
#endif
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef CONFIG_IXGBE_HWMON

View file

@ -39,6 +39,7 @@
#include <linux/uaccess.h>
#include "ixgbe.h"
#include "ixgbe_phy.h"
#define IXGBE_ALL_RAR_ENTRIES 16
@ -2112,13 +2113,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
int i;
u16 tx_itr_param, rx_itr_param;
u16 tx_itr_param, rx_itr_param, tx_itr_prev;
bool need_reset = false;
/* don't accept tx specific changes if we've got mixed RxTx vectors */
if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
&& ec->tx_coalesce_usecs)
if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
/* reject Tx specific changes in case of mixed RxTx vectors */
if (ec->tx_coalesce_usecs)
return -EINVAL;
tx_itr_prev = adapter->rx_itr_setting;
} else {
tx_itr_prev = adapter->tx_itr_setting;
}
if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
(ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
@ -2144,8 +2149,25 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
else
tx_itr_param = adapter->tx_itr_setting;
/* mixed Rx/Tx */
if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
adapter->tx_itr_setting = adapter->rx_itr_setting;
#if IS_ENABLED(CONFIG_BQL)
/* detect ITR changes that require update of TXDCTL.WTHRESH */
if ((adapter->tx_itr_setting > 1) &&
(adapter->tx_itr_setting < IXGBE_100K_ITR)) {
if ((tx_itr_prev == 1) ||
(tx_itr_prev > IXGBE_100K_ITR))
need_reset = true;
} else {
if ((tx_itr_prev > 1) &&
(tx_itr_prev < IXGBE_100K_ITR))
need_reset = true;
}
#endif
/* check the old value and enable RSC if necessary */
need_reset = ixgbe_update_rsc(adapter);
need_reset |= ixgbe_update_rsc(adapter);
for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
@ -2731,6 +2753,225 @@ static int ixgbe_get_ts_info(struct net_device *dev,
return 0;
}
static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
{
unsigned int max_combined;
u8 tcs = netdev_get_num_tc(adapter->netdev);
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
/* We only support one q_vector without MSI-X */
max_combined = 1;
} else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
/* SR-IOV currently only allows one queue on the PF */
max_combined = 1;
} else if (tcs > 1) {
/* For DCB report channels per traffic class */
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
/* 8 TC w/ 4 queues per TC */
max_combined = 4;
} else if (tcs > 4) {
/* 8 TC w/ 8 queues per TC */
max_combined = 8;
} else {
/* 4 TC w/ 16 queues per TC */
max_combined = 16;
}
} else if (adapter->atr_sample_rate) {
/* support up to 64 queues with ATR */
max_combined = IXGBE_MAX_FDIR_INDICES;
} else {
/* support up to 16 queues with RSS */
max_combined = IXGBE_MAX_RSS_INDICES;
}
return max_combined;
}
static void ixgbe_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
/* report maximum channels */
ch->max_combined = ixgbe_max_channels(adapter);
/* report info for other vector */
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
ch->max_other = NON_Q_VECTORS;
ch->other_count = NON_Q_VECTORS;
}
/* record RSS queues */
ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
/* nothing else to report if RSS is disabled */
if (ch->combined_count == 1)
return;
/* we do not support ATR queueing if SR-IOV is enabled */
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
return;
/* same thing goes for being DCB enabled */
if (netdev_get_num_tc(dev) > 1)
return;
/* if ATR is disabled we can exit */
if (!adapter->atr_sample_rate)
return;
/* report flow director queues as maximum channels */
ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
}
static int ixgbe_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
unsigned int count = ch->combined_count;
/* verify they are not requesting separate vectors */
if (!count || ch->rx_count || ch->tx_count)
return -EINVAL;
/* verify other_count has not changed */
if (ch->other_count != NON_Q_VECTORS)
return -EINVAL;
/* verify the number of channels does not exceed hardware limits */
if (count > ixgbe_max_channels(adapter))
return -EINVAL;
/* update feature limits from largest to smallest supported values */
adapter->ring_feature[RING_F_FDIR].limit = count;
/* cap RSS limit at 16 */
if (count > IXGBE_MAX_RSS_INDICES)
count = IXGBE_MAX_RSS_INDICES;
adapter->ring_feature[RING_F_RSS].limit = count;
#ifdef IXGBE_FCOE
/* cap FCoE limit at 8 */
if (count > IXGBE_FCRETA_SIZE)
count = IXGBE_FCRETA_SIZE;
adapter->ring_feature[RING_F_FCOE].limit = count;
#endif
/* use setup TC to update any traffic class queue mapping */
return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
}
static int ixgbe_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
u32 status;
u8 sff8472_rev, addr_mode;
int ret_val = 0;
bool page_swap = false;
/* avoid concurent i2c reads */
while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
msleep(100);
/* used by the service task */
set_bit(__IXGBE_READ_I2C, &adapter->state);
/* Check whether we support SFF-8472 or not */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_COMP,
&sff8472_rev);
if (status != 0) {
ret_val = -EIO;
goto err_out;
}
/* addressing mode is not supported */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_SWAP,
&addr_mode);
if (status != 0) {
ret_val = -EIO;
goto err_out;
}
if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
page_swap = true;
}
if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
/* We have a SFP, but it does not support SFF-8472 */
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
} else {
/* We have a SFP which supports a revision of SFF-8472. */
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
}
err_out:
clear_bit(__IXGBE_READ_I2C, &adapter->state);
return ret_val;
}
static int ixgbe_get_module_eeprom(struct net_device *dev,
struct ethtool_eeprom *ee,
u8 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
u8 databyte = 0xFF;
int i = 0;
int ret_val = 0;
/* ixgbe_get_module_info is called before this function in all
* cases, so we do not need any checks we already do above,
* and can trust ee->len to be a known value.
*/
while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
msleep(100);
set_bit(__IXGBE_READ_I2C, &adapter->state);
/* Read the first block, SFF-8079 */
for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
if (status != 0) {
/* Error occured while reading module */
ret_val = -EIO;
goto err_out;
}
data[i] = databyte;
}
/* If the second block is requested, check if SFF-8472 is supported. */
if (ee->len == ETH_MODULE_SFF_8472_LEN) {
if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP)
return -EOPNOTSUPP;
/* Read the second block, SFF-8472 */
for (i = ETH_MODULE_SFF_8079_LEN;
i < ETH_MODULE_SFF_8472_LEN; i++) {
status = hw->phy.ops.read_i2c_sff8472(hw,
i - ETH_MODULE_SFF_8079_LEN, &databyte);
if (status != 0) {
/* Error occured while reading module */
ret_val = -EIO;
goto err_out;
}
data[i] = databyte;
}
}
err_out:
clear_bit(__IXGBE_READ_I2C, &adapter->state);
return ret_val;
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_settings = ixgbe_get_settings,
.set_settings = ixgbe_set_settings,
@ -2759,7 +3000,11 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_coalesce = ixgbe_set_coalesce,
.get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc,
.get_channels = ixgbe_get_channels,
.set_channels = ixgbe_set_channels,
.get_ts_info = ixgbe_get_ts_info,
.get_module_info = ixgbe_get_module_info,
.get_module_eeprom = ixgbe_get_module_eeprom,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)

View file

@ -386,7 +386,6 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
fcoe = &adapter->ring_feature[RING_F_FCOE];
/* limit ourselves based on feature limits */
fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
if (fcoe_i) {
@ -562,9 +561,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
if (vmdq_i > 1 && fcoe_i) {
/* reserve no more than number of CPUs */
fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
/* alloc queues for FCoE separately */
fcoe->indices = fcoe_i;
fcoe->offset = vmdq_i * rss_i;
@ -623,8 +619,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
if (rss_i > 1 && adapter->atr_sample_rate) {
f = &adapter->ring_feature[RING_F_FDIR];
f->indices = min_t(u16, num_online_cpus(), f->limit);
rss_i = max_t(u16, rss_i, f->indices);
rss_i = f->indices = f->limit;
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
@ -776,21 +771,25 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
{
struct ixgbe_q_vector *q_vector;
struct ixgbe_ring *ring;
int node = -1;
int node = NUMA_NO_NODE;
int cpu = -1;
int ring_count, size;
u8 tcs = netdev_get_num_tc(adapter->netdev);
ring_count = txr_count + rxr_count;
size = sizeof(struct ixgbe_q_vector) +
(sizeof(struct ixgbe_ring) * ring_count);
/* customize cpu for Flow Director mapping */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
if (rss_i > 1 && adapter->atr_sample_rate) {
if (cpu_online(v_idx)) {
cpu = v_idx;
node = cpu_to_node(cpu);
}
}
}
/* allocate q_vector and rings */
q_vector = kzalloc_node(size, GFP_KERNEL, node);

View file

@ -2786,13 +2786,19 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
/*
* set WTHRESH to encourage burst writeback, it should not be set
* higher than 1 when ITR is 0 as it could cause false TX hangs
* higher than 1 when:
* - ITR is 0 as it could cause false TX hangs
* - ITR is set to > 100k int/sec and BQL is enabled
*
* In order to avoid issues WTHRESH + PTHRESH should always be equal
* to or less than the number of on chip descriptors, which is
* currently 40.
*/
#if IS_ENABLED(CONFIG_BQL)
if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
#else
if (!ring->q_vector || (ring->q_vector->itr < 8))
#endif
txdctl |= (1 << 16); /* WTHRESH = 1 */
else
txdctl |= (8 << 16); /* WTHRESH = 8 */
@ -2813,6 +2819,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
ring->atr_sample_rate = 0;
}
/* initialize XPS */
if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
struct ixgbe_q_vector *q_vector = ring->q_vector;
if (q_vector)
netif_set_xps_queue(adapter->netdev,
&q_vector->affinity_mask,
ring->queue_index);
}
clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
/* enable queue */
@ -4465,7 +4481,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
unsigned int rss;
unsigned int rss, fdir;
u32 fwsm;
#ifdef CONFIG_IXGBE_DCB
int j;
@ -4485,9 +4501,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->ring_feature[RING_F_RSS].limit = rss;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
adapter->ring_feature[RING_F_FDIR].limit = IXGBE_MAX_FDIR_INDICES;
adapter->max_q_vectors = MAX_Q_VECTORS_82599;
adapter->atr_sample_rate = 20;
fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_FDIR].limit = fdir;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
#ifdef CONFIG_IXGBE_DCA
adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
@ -5698,6 +5715,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
return;
/* concurent i2c reads are not supported */
if (test_bit(__IXGBE_READ_I2C, &adapter->state))
return;
/* someone else is in init, wait until next service event */
if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
return;
@ -6363,38 +6384,40 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
return __ixgbe_maybe_stop_tx(tx_ring, size);
}
#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
smp_processor_id();
#ifdef IXGBE_FCOE
__be16 protocol = vlan_get_protocol(skb);
if (((protocol == htons(ETH_P_FCOE)) ||
(protocol == htons(ETH_P_FIP))) &&
(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
struct ixgbe_adapter *adapter;
struct ixgbe_ring_feature *f;
int txq;
/*
* only execute the code below if protocol is FCoE
* or FIP and we have FCoE enabled on the adapter
*/
switch (vlan_get_protocol(skb)) {
case __constant_htons(ETH_P_FCOE):
case __constant_htons(ETH_P_FIP):
adapter = netdev_priv(dev);
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
break;
default:
return __netdev_pick_tx(dev, skb);
}
f = &adapter->ring_feature[RING_F_FCOE];
txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
smp_processor_id();
while (txq >= f->indices)
txq -= f->indices;
txq += adapter->ring_feature[RING_F_FCOE].offset;
return txq;
}
#endif
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
while (unlikely(txq >= dev->real_num_tx_queues))
txq -= dev->real_num_tx_queues;
return txq;
}
return skb_tx_hash(dev, skb);
return txq + f->offset;
}
#endif
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
@ -6799,6 +6822,7 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
}
}
#endif /* CONFIG_IXGBE_DCB */
/**
* ixgbe_setup_tc - configure net_device for multiple traffic classes
*
@ -6824,6 +6848,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
ixgbe_close(dev);
ixgbe_clear_interrupt_scheme(adapter);
#ifdef CONFIG_IXGBE_DCB
if (tc) {
netdev_set_num_tc(dev, tc);
ixgbe_set_prio_tc_map(adapter);
@ -6846,31 +6871,24 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
adapter->dcb_cfg.pfc_mode_enable = false;
}
ixgbe_init_interrupt_scheme(adapter);
ixgbe_validate_rtr(adapter, tc);
#endif /* CONFIG_IXGBE_DCB */
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(dev))
ixgbe_open(dev);
return ixgbe_open(dev);
return 0;
}
#endif /* CONFIG_IXGBE_DCB */
#ifdef CONFIG_PCI_IOV
void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
rtnl_lock();
#ifdef CONFIG_IXGBE_DCB
ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
#else
if (netif_running(netdev))
ixgbe_close(netdev);
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
ixgbe_open(netdev);
#endif
rtnl_unlock();
}
@ -7118,7 +7136,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
#ifdef IXGBE_FCOE
.ndo_select_queue = ixgbe_select_queue,
#endif
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
@ -7230,9 +7250,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found;
int i, err, pci_using_dac;
unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH];
unsigned int indices = num_possible_cpus();
unsigned int dcb_max = 0;
#ifdef IXGBE_FCOE
u16 device_caps;
#endif
@ -7281,25 +7300,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_save_state(pdev);
if (ii->mac == ixgbe_mac_82598EB) {
#ifdef CONFIG_IXGBE_DCB
if (ii->mac == ixgbe_mac_82598EB)
dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
IXGBE_MAX_RSS_INDICES);
else
dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
IXGBE_MAX_FDIR_INDICES);
/* 8 TC w/ 4 queues per TC */
indices = 4 * MAX_TRAFFIC_CLASS;
#else
indices = IXGBE_MAX_RSS_INDICES;
#endif
}
if (ii->mac == ixgbe_mac_82598EB)
indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
else
indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
#ifdef IXGBE_FCOE
indices += min_t(unsigned int, num_possible_cpus(),
IXGBE_MAX_FCOE_INDICES);
#endif
indices = max_t(unsigned int, dcb_max, indices);
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
if (!netdev) {
err = -ENOMEM;
@ -7454,13 +7463,17 @@ skip_sriov:
#ifdef IXGBE_FCOE
if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
unsigned int fcoe_l;
if (hw->mac.ops.get_device_caps) {
hw->mac.ops.get_device_caps(hw, &device_caps);
if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
}
adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
netdev->features |= NETIF_F_FSO |
NETIF_F_FCOE_CRC;

View file

@ -854,9 +854,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_IDENTIFIER,
&identifier);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
if (status != 0)
goto err_read_i2c_eeprom;
/* LAN ID is needed for sfp_type determination */
@ -870,26 +868,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_1GBE_COMP_CODES,
&comp_codes_1g);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_10GBE_COMP_CODES,
&comp_codes_10g);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_CABLE_TECHNOLOGY,
&cable_tech);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
if (status != 0)
goto err_read_i2c_eeprom;
/* ID Module
@ -987,27 +979,21 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_VENDOR_OUI_BYTE0,
&oui_bytes[0]);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE1,
&oui_bytes[1]);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE2,
&oui_bytes[2]);
if (status == IXGBE_ERR_SWFW_SYNC ||
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
if (status != 0)
goto err_read_i2c_eeprom;
vendor_oui =
@ -1307,9 +1293,9 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
break;
fail:
ixgbe_i2c_bus_clear(hw);
hw->mac.ops.release_swfw_sync(hw, swfw_mask);
msleep(100);
ixgbe_i2c_bus_clear(hw);
retry++;
if (retry < max_retry)
hw_dbg(hw, "I2C byte read error - Retrying.\n");