bnx2x: Add 57712 support

57712 HW supported with same set of features as for 57710/57711

Signed-off-by: Dmitry Kravkov <dmitry@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Dmitry Kravkov 2010-10-06 03:28:26 +00:00 committed by David S. Miller
parent 8fe23fbd94
commit f2e0899f0f
14 changed files with 3431 additions and 793 deletions

View File

@ -180,10 +180,16 @@ void bnx2x_panic_dump(struct bnx2x *bp);
#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \
offsetof(struct mf_cfg, field))
#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \
offsetof(struct mf2_cfg, field))
#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field))
#define MF_CFG_WR(bp, field, val) REG_WR(bp,\
MF_CFG_ADDR(bp, field), (val))
#define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field))
#define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \
(SHMEM2_RD((bp), size) > \
offsetof(struct shmem2_region, field)))
#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
@ -296,6 +302,8 @@ union db_prod {
union host_hc_status_block {
/* pointer to fp status block e1x */
struct host_hc_status_block_e1x *e1x_sb;
/* pointer to fp status block e2 */
struct host_hc_status_block_e2 *e2_sb;
};
struct bnx2x_fastpath {
@ -564,12 +572,19 @@ struct bnx2x_common {
#define CHIP_NUM_57710 0x164e
#define CHIP_NUM_57711 0x164f
#define CHIP_NUM_57711E 0x1650
#define CHIP_NUM_57712 0x1662
#define CHIP_NUM_57712E 0x1663
#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712)
#define CHIP_IS_57712E(bp) (CHIP_NUM(bp) == CHIP_NUM_57712E)
#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
CHIP_IS_57711E(bp))
#define IS_E1H_OFFSET CHIP_IS_E1H(bp)
#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
CHIP_IS_57712E(bp))
#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
#define IS_E1H_OFFSET (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp))
#define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000)
#define CHIP_REV_Ax 0x00000000
@ -596,6 +611,7 @@ struct bnx2x_common {
u32 shmem_base;
u32 shmem2_base;
u32 mf_cfg_base;
u32 mf2_cfg_base;
u32 hw_config;
@ -603,10 +619,25 @@ struct bnx2x_common {
u8 int_block;
#define INT_BLOCK_HC 0
#define INT_BLOCK_IGU 1
#define INT_BLOCK_MODE_NORMAL 0
#define INT_BLOCK_MODE_BW_COMP 2
#define CHIP_INT_MODE_IS_NBC(bp) \
(CHIP_IS_E2(bp) && \
!((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))
u8 chip_port_mode;
#define CHIP_4_PORT_MODE 0x0
#define CHIP_2_PORT_MODE 0x1
#define CHIP_PORT_MODE_NONE 0x2
#define CHIP_MODE(bp) (bp->common.chip_port_mode)
#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
};
/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
#define BNX2X_IGU_STAS_MSG_VF_CNT 64
#define BNX2X_IGU_STAS_MSG_PF_CNT 4
/* end of common */
@ -670,7 +701,7 @@ enum {
*/
#define FP_SB_MAX_E1x 16 /* fast-path interrupt contexts E1x */
#define MAX_CONTEXT FP_SB_MAX_E1x
#define FP_SB_MAX_E2 16 /* fast-path interrupt contexts E2 */
/*
* cid_cnt paramter below refers to the value returned by
@ -754,7 +785,7 @@ struct bnx2x_slowpath {
#define MAX_DYNAMIC_ATTN_GRPS 8
struct attn_route {
u32 sig[4];
u32 sig[5];
};
struct iro {
@ -896,13 +927,20 @@ struct bnx2x {
#define HW_VLAN_RX_FLAG 0x800
#define MF_FUNC_DIS 0x1000
int func;
int pf_num; /* absolute PF number */
int pfid; /* per-path PF number */
int base_fw_ndsb;
#define BP_PORT(bp) (bp->func % PORT_MAX)
#define BP_FUNC(bp) (bp->func)
#define BP_E1HVN(bp) (bp->func >> 1)
#define BP_PATH(bp) (!CHIP_IS_E2(bp) ? \
0 : (bp->pf_num & 1))
#define BP_PORT(bp) (bp->pfid & 1)
#define BP_FUNC(bp) (bp->pfid)
#define BP_ABS_FUNC(bp) (bp->pf_num)
#define BP_E1HVN(bp) (bp->pfid >> 1)
#define BP_VN(bp) (CHIP_MODE_IS_4_PORT(bp) ? \
0 : BP_E1HVN(bp))
#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\
BP_VN(bp) * (CHIP_IS_E1x(bp) ? 2 : 1))
#ifdef BCM_CNIC
#define BCM_CNIC_CID_START 16
@ -932,7 +970,8 @@ struct bnx2x {
struct cmng_struct_per_port cmng;
u32 vn_weight_sum;
u32 mf_config;
u32 mf_config[E1HVN_MAX];
u32 mf2_config[E2_FUNC_MAX];
u16 mf_ov;
u8 mf_mode;
#define IS_MF(bp) (bp->mf_mode != 0)
@ -1127,11 +1166,11 @@ struct bnx2x {
#define RSS_IPV6_CAP 0x0004
#define RSS_IPV6_TCP_CAP 0x0008
#define BNX2X_MAX_QUEUES(bp) (IS_MF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \
: MAX_CONTEXT)
#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
#define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE)
#define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1)
#define RSS_IPV4_CAP_MASK \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
@ -1342,14 +1381,40 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
/* DMAE command defines */
#define DMAE_CMD_SRC_PCI 0
#define DMAE_CMD_SRC_GRC DMAE_COMMAND_SRC
#define DMAE_TIMEOUT -1
#define DMAE_PCI_ERROR -2 /* E2 and onward */
#define DMAE_NOT_RDY -3
#define DMAE_PCI_ERR_FLAG 0x80000000
#define DMAE_CMD_DST_PCI (1 << DMAE_COMMAND_DST_SHIFT)
#define DMAE_CMD_DST_GRC (2 << DMAE_COMMAND_DST_SHIFT)
#define DMAE_SRC_PCI 0
#define DMAE_SRC_GRC 1
#define DMAE_CMD_C_DST_PCI 0
#define DMAE_CMD_C_DST_GRC (1 << DMAE_COMMAND_C_DST_SHIFT)
#define DMAE_DST_NONE 0
#define DMAE_DST_PCI 1
#define DMAE_DST_GRC 2
#define DMAE_COMP_PCI 0
#define DMAE_COMP_GRC 1
/* E2 and onward - PCI error handling in the completion */
#define DMAE_COMP_REGULAR 0
#define DMAE_COM_SET_ERR 1
#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << \
DMAE_COMMAND_SRC_SHIFT)
#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << \
DMAE_COMMAND_SRC_SHIFT)
#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << \
DMAE_COMMAND_DST_SHIFT)
#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << \
DMAE_COMMAND_DST_SHIFT)
#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << \
DMAE_COMMAND_C_DST_SHIFT)
#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << \
DMAE_COMMAND_C_DST_SHIFT)
#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE
@ -1365,10 +1430,20 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET
#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
#define DMAE_SRC_PF 0
#define DMAE_SRC_VF 1
#define DMAE_DST_PF 0
#define DMAE_DST_VF 1
#define DMAE_C_SRC 0
#define DMAE_C_DST 1
#define DMAE_LEN32_RD_MAX 0x80
#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
#define DMAE_COMP_VAL 0xe0d0d0ae
#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit
indicates eror */
#define MAX_DMAE_C_PER_PORT 8
#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
@ -1534,6 +1609,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define GET_FLAG(value, mask) \
(((value) &= (mask)) >> (mask##_SHIFT))
#define GET_FIELD(value, fname) \
(((value) & (fname##_MASK)) >> (fname##_SHIFT))
#define CAM_IS_INVALID(x) \
(GET_FLAG(x.flags, \
MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
@ -1553,6 +1631,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
#endif
#ifndef ETH_MAX_RX_CLIENTS_E2
#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
#endif
#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4
@ -1570,13 +1651,18 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_EXTERN extern
#endif
BNX2X_EXTERN int load_count[3]; /* 0-common, 1-port0, 2-port1 */
BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
bool with_comp, u8 comp_type);
#define WAIT_RAMROD_POLL 0x01
#define WAIT_RAMROD_COMMON 0x02

View File

@ -18,7 +18,7 @@
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <linux/firmware.h>
#include "bnx2x_cmn.h"
@ -118,16 +118,10 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp)
pkt_cons = TX_BD(sw_cons);
/* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
" pkt_cons %u\n",
fp->index, hw_cons, sw_cons, pkt_cons);
DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
hw_cons, sw_cons, pkt_cons);
/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
rmb();
prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
}
*/
bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
sw_cons++;
}
@ -749,7 +743,8 @@ void bnx2x_link_report(struct bnx2x *bp)
u16 vn_max_rate;
vn_max_rate =
((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
((bp->mf_config[BP_VN(bp)] &
FUNC_MF_CFG_MAX_BW_MASK) >>
FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
if (vn_max_rate < line_speed)
line_speed = vn_max_rate;
@ -912,6 +907,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
if (j != 0)
continue;
if (!CHIP_IS_E2(bp)) {
REG_WR(bp, BAR_USTRORM_INTMEM +
USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
U64_LO(fp->rx_comp_mapping));
@ -919,7 +915,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
U64_HI(fp->rx_comp_mapping));
}
}
}
static void bnx2x_free_tx_skbs(struct bnx2x *bp)
{
@ -1308,23 +1304,27 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
} else {
int path = BP_PATH(bp);
int port = BP_PORT(bp);
DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
load_count[0], load_count[1], load_count[2]);
load_count[0]++;
load_count[1 + port]++;
DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
load_count[0], load_count[1], load_count[2]);
if (load_count[0] == 1)
DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
load_count[path][0]++;
load_count[path][1 + port]++;
DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
if (load_count[path][0] == 1)
load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
else if (load_count[1 + port] == 1)
else if (load_count[path][1 + port] == 1)
load_code = FW_MSG_CODE_DRV_LOAD_PORT;
else
load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
}
if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
(load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
(load_code == FW_MSG_CODE_DRV_LOAD_PORT))
bp->port.pmf = 1;
else
@ -1349,7 +1349,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Setup NIC internals and enable interrupts */
bnx2x_nic_init(bp, load_code);
if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
(load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
(bp->common.shmem2_base))
SHMEM2_WR(bp, dcc_support,
(SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
@ -1389,8 +1390,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
#endif
}
if (CHIP_IS_E1H(bp))
if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
if (!CHIP_IS_E1(bp) &&
(bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
bp->flags |= MF_FUNC_DIS;
}
@ -1527,8 +1528,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
bp->rx_mode = BNX2X_RX_MODE_NONE;
bnx2x_set_storm_rx_mode(bp);
/* Stop Tx */
bnx2x_tx_disable(bp);
del_timer_sync(&bp->timer);
SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
(DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@ -1855,6 +1858,120 @@ exit_lbl:
}
#endif
static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
struct eth_tx_parse_bd_e2 *pbd,
u32 xmit_type)
{
pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
if ((xmit_type & XMIT_GSO_V6) &&
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
/**
* Update PBD in GSO case.
*
* @param skb
* @param tx_start_bd
* @param pbd
* @param xmit_type
*/
static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
struct eth_tx_parse_bd_e1x *pbd,
u32 xmit_type)
{
pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
pbd->tcp_flags = pbd_tcp_flags(skb);
if (xmit_type & XMIT_GSO_V4) {
pbd->ip_id = swab16(ip_hdr(skb)->id);
pbd->tcp_pseudo_csum =
swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
0, IPPROTO_TCP, 0));
} else
pbd->tcp_pseudo_csum =
swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0));
pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
}
/**
*
* @param skb
* @param tx_start_bd
* @param pbd_e2
* @param xmit_type
*
* @return header len
*/
static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
struct eth_tx_parse_bd_e2 *pbd,
u32 xmit_type)
{
pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
skb->data) / 2) <<
ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
}
/**
*
* @param skb
* @param tx_start_bd
* @param pbd
* @param xmit_type
*
* @return Header length
*/
static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
struct eth_tx_parse_bd_e1x *pbd,
u32 xmit_type)
{
u8 hlen = (skb_network_header(skb) - skb->data) / 2;
/* for now NS flag is not used in Linux */
pbd->global_data =
(hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
pbd->ip_hlen_w = (skb_transport_header(skb) -
skb_network_header(skb)) / 2;
hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
pbd->total_hlen_w = cpu_to_le16(hlen);
hlen = hlen*2;
if (xmit_type & XMIT_CSUM_TCP) {
pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
} else {
s8 fix = SKB_CS_OFF(skb); /* signed! */
DP(NETIF_MSG_TX_QUEUED,
"hlen %d fix %d csum before fix %x\n",
le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
/* HW bug: fixup the CSUM */
pbd->tcp_pseudo_csum =
bnx2x_csum_fix(skb_transport_header(skb),
SKB_CS(skb), fix);
DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
pbd->tcp_pseudo_csum);
}
return hlen;
}
/* called with netif_tx_lock
* bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
* netif_wake_queue()
@ -1868,6 +1985,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct eth_tx_start_bd *tx_start_bd;
struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
u16 pkt_prod, bd_prod;
int nbd, fp_index;
dma_addr_t mapping;
@ -1895,9 +2013,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
" gso type %x xmit_type %x\n",
skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
"protocol(%x,%x) gso type %x xmit_type %x\n",
fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
eth = (struct ethhdr *)skb->data;
@ -1988,44 +2106,21 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd->bd_flags.as_bitfield |=
ETH_TX_BD_FLAGS_IS_UDP;
}
if (CHIP_IS_E2(bp)) {
pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
/* Set PBD in checksum offload case */
if (xmit_type & XMIT_CSUM)
hlen = bnx2x_set_pbd_csum_e2(bp,
skb, pbd_e2, xmit_type);
} else {
pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
/* Set PBD in checksum offload case */
if (xmit_type & XMIT_CSUM) {
hlen = (skb_network_header(skb) - skb->data) / 2;
if (xmit_type & XMIT_CSUM)
hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
/* for now NS flag is not used in Linux */
pbd_e1x->global_data =
(hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
pbd_e1x->ip_hlen_w = (skb_transport_header(skb) -
skb_network_header(skb)) / 2;
hlen += pbd_e1x->ip_hlen_w + tcp_hdrlen(skb) / 2;
pbd_e1x->total_hlen_w = cpu_to_le16(hlen);
hlen = hlen*2;
if (xmit_type & XMIT_CSUM_TCP) {
pbd_e1x->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
} else {
s8 fix = SKB_CS_OFF(skb); /* signed! */
DP(NETIF_MSG_TX_QUEUED,
"hlen %d fix %d csum before fix %x\n",
le16_to_cpu(pbd_e1x->total_hlen_w),
fix, SKB_CS(skb));
/* HW bug: fixup the CSUM */
pbd_e1x->tcp_pseudo_csum =
bnx2x_csum_fix(skb_transport_header(skb),
SKB_CS(skb), fix);
DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
pbd_e1x->tcp_pseudo_csum);
}
}
mapping = dma_map_single(&bp->pdev->dev, skb->data,
@ -2057,26 +2152,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb_headlen(skb) > hlen))
bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
hlen, bd_prod, ++nbd);
pbd_e1x->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
pbd_e1x->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
pbd_e1x->tcp_flags = pbd_tcp_flags(skb);
if (xmit_type & XMIT_GSO_V4) {
pbd_e1x->ip_id = swab16(ip_hdr(skb)->id);
pbd_e1x->tcp_pseudo_csum =
swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
0, IPPROTO_TCP, 0));
} else
pbd_e1x->tcp_pseudo_csum =
swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0));
pbd_e1x->global_data |=
ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
if (CHIP_IS_E2(bp))
bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
else
bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
}
tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
@ -2124,7 +2203,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
le16_to_cpu(pbd_e1x->total_hlen_w));
if (pbd_e2)
DP(NETIF_MSG_TX_QUEUED,
"PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
pbd_e2->parsing_data);
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
/*
@ -2327,6 +2412,8 @@ int bnx2x_resume(struct pci_dev *pdev)
bnx2x_set_power_state(bp, PCI_D0);
netif_device_attach(dev);
/* Since the chip was reset, clear the FW sequence number */
bp->fw_seq = 0;
rc = bnx2x_nic_load(bp, LOAD_OPEN);
rtnl_unlock();

View File

@ -366,9 +366,76 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
}
static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
u8 segment, u16 index, u8 op,
u8 update, u32 igu_addr)
{
struct igu_regular cmd_data = {0};
cmd_data.sb_id_and_flags =
((index << IGU_REGULAR_SB_INDEX_SHIFT) |
(segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
(update << IGU_REGULAR_BUPDATE_SHIFT) |
(op << IGU_REGULAR_ENABLE_INT_SHIFT));
DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n",
cmd_data.sb_id_and_flags, igu_addr);
REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
/* Make sure that ACK is written */
mmiowb();
barrier();
}
static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp,
u8 idu_sb_id, bool is_Pf)
{
u32 data, ctl, cnt = 100;
u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
u32 sb_bit = 1 << (idu_sb_id%32);
u32 func_encode = BP_FUNC(bp) |
((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
/* Not supported in BC mode */
if (CHIP_INT_MODE_IS_BC(bp))
return;
data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
<< IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
IGU_REGULAR_CLEANUP_SET |
IGU_REGULAR_BCLEANUP;
ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
func_encode << IGU_CTRL_REG_FID_SHIFT |
IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
data, igu_addr_data);
REG_WR(bp, igu_addr_data, data);
mmiowb();
barrier();
DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
ctl, igu_addr_ctl);
REG_WR(bp, igu_addr_ctl, ctl);
mmiowb();
barrier();
/* wait for clean up to finish */
while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
msleep(20);
static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: "
"idu_sb_id %d offset %d bit %d (cnt %d)\n",
idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
}
}
static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
u8 storm, u16 index, u8 op, u8 update)
{
u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
@ -390,7 +457,37 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
mmiowb();
barrier();
}
static inline u16 bnx2x_ack_int(struct bnx2x *bp)
static inline void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
u16 index, u8 op, u8 update)
{
u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
igu_addr);
}
static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
u16 index, u8 op, u8 update)
{
if (bp->common.int_block == INT_BLOCK_HC)
bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
else {
u8 segment;
if (CHIP_INT_MODE_IS_BC(bp))
segment = storm;
else if (igu_sb_id != bp->igu_dsb_id)
segment = IGU_SEG_ACCESS_DEF;
else if (storm == ATTENTION_ID)
segment = IGU_SEG_ACCESS_ATTN;
else
segment = IGU_SEG_ACCESS_DEF;
bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
}
}
static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
{
u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
COMMAND_REG_SIMD_MASK);
@ -399,13 +496,34 @@ static inline u16 bnx2x_ack_int(struct bnx2x *bp)
DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
result, hc_addr);
barrier();
return result;
}
static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
{
u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
u32 result = REG_RD(bp, igu_addr);
DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n",
result, igu_addr);
barrier();
return result;
}
static inline u16 bnx2x_ack_int(struct bnx2x *bp)
{
barrier();
if (bp->common.int_block == INT_BLOCK_HC)
return bnx2x_hc_ack_int(bp);
else
return bnx2x_igu_ack_int(bp);
}
/*
* fast path service functions
*/
static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
{
/* Tell compiler that consumer and producer can change */
@ -456,6 +574,17 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
rx_cons_sb++;
return (fp->rx_comp_cons != rx_cons_sb);
}
/**
* disables tx from stack point of view
*
* @param bp
*/
static inline void bnx2x_tx_disable(struct bnx2x *bp)
{
netif_tx_disable(bp->dev);
netif_carrier_off(bp->dev);
}
static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
struct bnx2x_fastpath *fp, u16 index)
{

View File

@ -31,14 +31,24 @@ struct dump_sign {
#define RI_E1 0x1
#define RI_E1H 0x2
#define RI_E2 0x4
#define RI_ONLINE 0x100
#define RI_PATH0_DUMP 0x200
#define RI_PATH1_DUMP 0x400
#define RI_E1_OFFLINE (RI_E1)
#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
#define RI_E1H_OFFLINE (RI_E1H)
#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
#define RI_ALL_OFFLINE (RI_E1 | RI_E1H)
#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
#define RI_E2_OFFLINE (RI_E2)
#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H)
#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H)
#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE)
#define RI_E1E2_OFFLINE (RI_E2 | RI_E1)
#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE)
#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2)
#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
#define MAX_TIMER_PENDING 200
#define TIMER_SCAN_DONT_CARE 0xFF
@ -513,6 +523,12 @@ static const struct wreg_addr wreg_addrs_e1h[WREGS_COUNT_E1H] = {
{ 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE }
};
#define WREGS_COUNT_E2 1
static const u32 read_reg_e2_0[] = { 0x1b1040, 0x1b1000 };
static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = {
{ 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE }
};
static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 };
@ -531,4 +547,17 @@ static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] =
{ 0x1640d0, 0x1640d4 };
#define PAGE_MODE_VALUES_E2 2
#define PAGE_READ_REGS_E2 1
#define PAGE_WRITE_REGS_E2 1
static const u32 page_vals_e2[PAGE_MODE_VALUES_E2] = { 0, 128 };
static const u32 page_write_regs_e2[PAGE_WRITE_REGS_E2] = { 328476 };
static const struct reg_addr page_read_regs_e2[PAGE_READ_REGS_E2] = {
{ 0x58000, 4608, RI_E2_ONLINE } };
#endif /* BNX2X_DUMP_H */

View File

@ -41,19 +41,19 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
(bp->link_vars.link_up)) {
cmd->speed = bp->link_vars.line_speed;
cmd->duplex = bp->link_vars.duplex;
if (IS_MF(bp)) {
u16 vn_max_rate;
vn_max_rate =
((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
if (vn_max_rate < cmd->speed)
cmd->speed = vn_max_rate;
}
} else {
cmd->speed = bp->link_params.req_line_speed[cfg_idx];
cmd->duplex = bp->link_params.req_duplex[cfg_idx];
}
if (IS_MF(bp)) {
u16 vn_max_rate = ((bp->mf_config[BP_VN(bp)] &
FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT) *
100;
if (vn_max_rate < cmd->speed)
cmd->speed = vn_max_rate;
}
if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
cmd->port = PORT_TP;
@ -298,6 +298,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE)
static int bnx2x_get_regs_len(struct net_device *dev)
{
@ -315,7 +316,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
regdump_len += wreg_addrs_e1[i].size *
(1 + wreg_addrs_e1[i].read_regs_count);
} else { /* E1H */
} else if (CHIP_IS_E1H(bp)) {
for (i = 0; i < REGS_COUNT; i++)
if (IS_E1H_ONLINE(reg_addrs[i].info))
regdump_len += reg_addrs[i].size;
@ -324,6 +325,15 @@ static int bnx2x_get_regs_len(struct net_device *dev)
if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
regdump_len += wreg_addrs_e1h[i].size *
(1 + wreg_addrs_e1h[i].read_regs_count);
} else if (CHIP_IS_E2(bp)) {
for (i = 0; i < REGS_COUNT; i++)
if (IS_E2_ONLINE(reg_addrs[i].info))
regdump_len += reg_addrs[i].size;
for (i = 0; i < WREGS_COUNT_E2; i++)
if (IS_E2_ONLINE(wreg_addrs_e2[i].info))
regdump_len += wreg_addrs_e2[i].size *
(1 + wreg_addrs_e2[i].read_regs_count);
}
regdump_len *= 4;
regdump_len += sizeof(struct dump_hdr);
@ -331,6 +341,23 @@ static int bnx2x_get_regs_len(struct net_device *dev)
return regdump_len;
}
static inline void bnx2x_read_pages_regs_e2(struct bnx2x *bp, u32 *p)
{
u32 i, j, k, n;
for (i = 0; i < PAGE_MODE_VALUES_E2; i++) {
for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
REG_WR(bp, page_write_regs_e2[j], page_vals_e2[i]);
for (k = 0; k < PAGE_READ_REGS_E2; k++)
if (IS_E2_ONLINE(page_read_regs_e2[k].info))
for (n = 0; n <
page_read_regs_e2[k].size; n++)
*p++ = REG_RD(bp,
page_read_regs_e2[k].addr + n*4);
}
}
}
static void bnx2x_get_regs(struct net_device *dev,
struct ethtool_regs *regs, void *_p)
{
@ -350,7 +377,14 @@ static void bnx2x_get_regs(struct net_device *dev,
dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
if (CHIP_IS_E1(bp))
dump_hdr.info = RI_E1_ONLINE;
else if (CHIP_IS_E1H(bp))
dump_hdr.info = RI_E1H_ONLINE;
else if (CHIP_IS_E2(bp))
dump_hdr.info = RI_E2_ONLINE |
(BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
p += dump_hdr.hdr_size + 1;
@ -362,16 +396,25 @@ static void bnx2x_get_regs(struct net_device *dev,
*p++ = REG_RD(bp,
reg_addrs[i].addr + j*4);
} else { /* E1H */
} else if (CHIP_IS_E1H(bp)) {
for (i = 0; i < REGS_COUNT; i++)
if (IS_E1H_ONLINE(reg_addrs[i].info))
for (j = 0; j < reg_addrs[i].size; j++)
*p++ = REG_RD(bp,
reg_addrs[i].addr + j*4);
} else if (CHIP_IS_E2(bp)) {
for (i = 0; i < REGS_COUNT; i++)
if (IS_E2_ONLINE(reg_addrs[i].info))
for (j = 0; j < reg_addrs[i].size; j++)
*p++ = REG_RD(bp,
reg_addrs[i].addr + j*4);
bnx2x_read_pages_regs_e2(bp, p);
}
}
#define PHY_FW_VER_LEN 10
#define PHY_FW_VER_LEN 20
static void bnx2x_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
@ -474,7 +517,7 @@ static u32 bnx2x_get_link(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
if (bp->flags & MF_FUNC_DIS)
if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
return 0;
return bp->link_vars.link_up;
@ -1235,6 +1278,9 @@ static int bnx2x_test_registers(struct bnx2x *bp)
for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
u32 offset, mask, save_val, val;
if (CHIP_IS_E2(bp) &&
reg_tbl[i].offset0 == HC_REG_AGG_INT_0)
continue;
offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
mask = reg_tbl[i].mask;
@ -1286,20 +1332,33 @@ static int bnx2x_test_memory(struct bnx2x *bp)
u32 offset;
u32 e1_mask;
u32 e1h_mask;
u32 e2_mask;
} prty_tbl[] = {
{ "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
{ "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
{ "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
{ "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
{ "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
{ "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
{ "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0, 0 },
{ "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2, 0 },
{ "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0, 0 },
{ "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0, 0 },
{ "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0, 0 },
{ "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0, 0 },
{ NULL, 0xffffffff, 0, 0 }
{ NULL, 0xffffffff, 0, 0, 0 }
};
if (!netif_running(bp->dev))
return rc;
/* pre-Check the parity status */
for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
val = REG_RD(bp, prty_tbl[i].offset);
if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
(CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
(CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
DP(NETIF_MSG_HW,
"%s is 0x%x\n", prty_tbl[i].name, val);
goto test_mem_exit;
}
}
/* Go through all the memories */
for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
for (j = 0; j < mem_tbl[i].size; j++)
@ -1309,7 +1368,8 @@ static int bnx2x_test_memory(struct bnx2x *bp)
for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
val = REG_RD(bp, prty_tbl[i].offset);
if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
(CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
(CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
(CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
DP(NETIF_MSG_HW,
"%s is 0x%x\n", prty_tbl[i].name, val);
goto test_mem_exit;
@ -1324,7 +1384,7 @@ test_mem_exit:
static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
{
int cnt = 1000;
int cnt = 1400;
if (link_up)
while (bnx2x_link_test(bp, is_serdes) && cnt--)
@ -1344,6 +1404,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
struct sw_tx_bd *tx_buf;
struct eth_tx_start_bd *tx_start_bd;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
dma_addr_t mapping;
union eth_rx_cqe *cqe;
u8 cqe_fp_flags;
@ -1411,7 +1472,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
pbd_e1x = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e1x;
pbd_e2 = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e2;
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
wmb();
@ -1431,6 +1494,13 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
if (tx_idx != tx_start_idx + num_pkts)
goto test_loopback_exit;
/* Unlike HC IGU won't generate an interrupt for status block
* updates that have been performed while interrupts were
* disabled.
*/
if (bp->common.int_block == INT_BLOCK_IGU)
bnx2x_tx_int(fp_tx);
rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
if (rx_idx != rx_start_idx + num_pkts)
goto test_loopback_exit;
@ -1573,8 +1643,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
config->hdr.length = 0;
if (CHIP_IS_E1(bp))
/* use last unicast entries */
config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
else
config->hdr.offset = BP_FUNC(bp);
config->hdr.client_id = bp->fp->cl_id;

View File

@ -663,6 +663,7 @@ struct shm_dev_info { /* size */
#define FUNC_7 7
#define E1_FUNC_MAX 2
#define E1H_FUNC_MAX 8
#define E2_FUNC_MAX 4 /* per path */
#define VN_0 0
#define VN_1 1
@ -821,6 +822,9 @@ struct drv_func_mb {
#define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
/* Load common chip is supported from bc 6.0.0 */
#define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000
#define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000
#define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000
#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
#define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000
@ -1026,7 +1030,17 @@ struct shmem_region { /* SharedMem Offset (size) */
}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */
struct fw_flr_ack {
u32 pf_ack;
u32 vf_ack[1];
u32 iov_dis_ack;
};
struct fw_flr_mb {
u32 aggint;
u32 opgen_addr;
struct fw_flr_ack ack;
};
struct shmem2_region {
@ -1047,6 +1061,19 @@ struct shmem2_region {
* (the size filed is smaller than 0xc) the mf_cfg resides at the
* end of struct shmem_region
*/
u32 mf_cfg_addr;
#define SHMEM_MF_CFG_ADDR_NONE 0x00000000
struct fw_flr_mb flr_mb;
u32 reserved[3];
/*
* The other shmemX_base_addr holds the other path's shmem address
* required for example in case of common phy init, or for path1 to know
* the address of mcp debug trace which is located in offset from shmem
* of path0
*/
u32 other_shmem_base_addr;
u32 other_shmem2_base_addr;
};
@ -1206,10 +1233,126 @@ struct bmac1_stats {
u32 rx_stat_gripj_hi;
};
struct bmac2_stats {
u32 tx_stat_gtpk_lo; /* gtpok */
u32 tx_stat_gtpk_hi; /* gtpok */
u32 tx_stat_gtxpf_lo; /* gtpf */
u32 tx_stat_gtxpf_hi; /* gtpf */
u32 tx_stat_gtpp_lo; /* NEW BMAC2 */
u32 tx_stat_gtpp_hi; /* NEW BMAC2 */
u32 tx_stat_gtfcs_lo;
u32 tx_stat_gtfcs_hi;
u32 tx_stat_gtuca_lo; /* NEW BMAC2 */
u32 tx_stat_gtuca_hi; /* NEW BMAC2 */
u32 tx_stat_gtmca_lo;
u32 tx_stat_gtmca_hi;
u32 tx_stat_gtbca_lo;
u32 tx_stat_gtbca_hi;
u32 tx_stat_gtovr_lo;
u32 tx_stat_gtovr_hi;
u32 tx_stat_gtfrg_lo;
u32 tx_stat_gtfrg_hi;
u32 tx_stat_gtpkt1_lo; /* gtpkt */
u32 tx_stat_gtpkt1_hi; /* gtpkt */
u32 tx_stat_gt64_lo;
u32 tx_stat_gt64_hi;
u32 tx_stat_gt127_lo;
u32 tx_stat_gt127_hi;
u32 tx_stat_gt255_lo;
u32 tx_stat_gt255_hi;
u32 tx_stat_gt511_lo;
u32 tx_stat_gt511_hi;
u32 tx_stat_gt1023_lo;
u32 tx_stat_gt1023_hi;
u32 tx_stat_gt1518_lo;
u32 tx_stat_gt1518_hi;
u32 tx_stat_gt2047_lo;
u32 tx_stat_gt2047_hi;
u32 tx_stat_gt4095_lo;
u32 tx_stat_gt4095_hi;
u32 tx_stat_gt9216_lo;
u32 tx_stat_gt9216_hi;
u32 tx_stat_gt16383_lo;
u32 tx_stat_gt16383_hi;
u32 tx_stat_gtmax_lo;
u32 tx_stat_gtmax_hi;
u32 tx_stat_gtufl_lo;
u32 tx_stat_gtufl_hi;
u32 tx_stat_gterr_lo;
u32 tx_stat_gterr_hi;
u32 tx_stat_gtbyt_lo;
u32 tx_stat_gtbyt_hi;
u32 rx_stat_gr64_lo;
u32 rx_stat_gr64_hi;
u32 rx_stat_gr127_lo;
u32 rx_stat_gr127_hi;
u32 rx_stat_gr255_lo;
u32 rx_stat_gr255_hi;
u32 rx_stat_gr511_lo;
u32 rx_stat_gr511_hi;
u32 rx_stat_gr1023_lo;
u32 rx_stat_gr1023_hi;
u32 rx_stat_gr1518_lo;
u32 rx_stat_gr1518_hi;
u32 rx_stat_gr2047_lo;
u32 rx_stat_gr2047_hi;
u32 rx_stat_gr4095_lo;
u32 rx_stat_gr4095_hi;
u32 rx_stat_gr9216_lo;
u32 rx_stat_gr9216_hi;
u32 rx_stat_gr16383_lo;
u32 rx_stat_gr16383_hi;
u32 rx_stat_grmax_lo;
u32 rx_stat_grmax_hi;
u32 rx_stat_grpkt_lo;
u32 rx_stat_grpkt_hi;
u32 rx_stat_grfcs_lo;
u32 rx_stat_grfcs_hi;
u32 rx_stat_gruca_lo;
u32 rx_stat_gruca_hi;
u32 rx_stat_grmca_lo;
u32 rx_stat_grmca_hi;
u32 rx_stat_grbca_lo;
u32 rx_stat_grbca_hi;
u32 rx_stat_grxpf_lo; /* grpf */
u32 rx_stat_grxpf_hi; /* grpf */
u32 rx_stat_grpp_lo;
u32 rx_stat_grpp_hi;
u32 rx_stat_grxuo_lo; /* gruo */
u32 rx_stat_grxuo_hi; /* gruo */
u32 rx_stat_grjbr_lo;
u32 rx_stat_grjbr_hi;
u32 rx_stat_grovr_lo;
u32 rx_stat_grovr_hi;
u32 rx_stat_grxcf_lo; /* grcf */
u32 rx_stat_grxcf_hi; /* grcf */
u32 rx_stat_grflr_lo;
u32 rx_stat_grflr_hi;
u32 rx_stat_grpok_lo;
u32 rx_stat_grpok_hi;
u32 rx_stat_grmeg_lo;
u32 rx_stat_grmeg_hi;
u32 rx_stat_grmeb_lo;
u32 rx_stat_grmeb_hi;
u32 rx_stat_grbyt_lo;
u32 rx_stat_grbyt_hi;
u32 rx_stat_grund_lo;
u32 rx_stat_grund_hi;
u32 rx_stat_grfrg_lo;
u32 rx_stat_grfrg_hi;
u32 rx_stat_grerb_lo; /* grerrbyt */
u32 rx_stat_grerb_hi; /* grerrbyt */
u32 rx_stat_grfre_lo; /* grfrerr */
u32 rx_stat_grfre_hi; /* grfrerr */
u32 rx_stat_gripj_lo;
u32 rx_stat_gripj_hi;
};
union mac_stats {
struct emac_stats emac_stats;
struct bmac1_stats bmac1_stats;
struct bmac2_stats bmac2_stats;
};
@ -1593,6 +1736,24 @@ union igu_consprod_reg {
};
/*
* Control register for the IGU command register
*/
struct igu_ctrl_reg {
u32 ctrl_data;
#define IGU_CTRL_REG_ADDRESS (0xFFF<<0)
#define IGU_CTRL_REG_ADDRESS_SHIFT 0
#define IGU_CTRL_REG_FID (0x7F<<12)
#define IGU_CTRL_REG_FID_SHIFT 12
#define IGU_CTRL_REG_RESERVED (0x1<<19)
#define IGU_CTRL_REG_RESERVED_SHIFT 19
#define IGU_CTRL_REG_TYPE (0x1<<20)
#define IGU_CTRL_REG_TYPE_SHIFT 20
#define IGU_CTRL_REG_UNUSED (0x7FF<<21)
#define IGU_CTRL_REG_UNUSED_SHIFT 21
};
/*
* Parser parsing flags field
*/
@ -1923,6 +2084,27 @@ struct eth_tx_parse_bd_e1x {
__le32 tcp_send_seq;
};
/*
* Tx parsing BD structure for ETH E2
*/
struct eth_tx_parse_bd_e2 {
__le16 dst_mac_addr_lo;
__le16 dst_mac_addr_mid;
__le16 dst_mac_addr_hi;
__le16 src_mac_addr_lo;
__le16 src_mac_addr_mid;
__le16 src_mac_addr_hi;
__le32 parsing_data;
#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0)
#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13)
#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13
#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17)
#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17
#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31)
#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31
};
/*
* The last BD in the BD memory will hold a pointer to the next BD memory
*/
@ -1939,6 +2121,7 @@ union eth_tx_bd_types {
struct eth_tx_start_bd start_bd;
struct eth_tx_bd reg_bd;
struct eth_tx_parse_bd_e1x parse_bd_e1x;
struct eth_tx_parse_bd_e2 parse_bd_e2;
struct eth_tx_next_bd next_bd;
};

View File

@ -97,6 +97,9 @@
#define MISC_AEU_BLOCK 35
#define PGLUE_B_BLOCK 36
#define IGU_BLOCK 37
#define ATC_BLOCK 38
#define QM_4PORT_BLOCK 39
#define XSEM_4PORT_BLOCK 40
/* Returns the index of start or end of a specific block stage in ops array*/

View File

@ -486,18 +486,30 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
if (r_order == MAX_RD_ORD)
if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
if (CHIP_IS_E2(bp))
REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
else
REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
if (CHIP_IS_E1H(bp)) {
if (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) {
/* MPS w_order optimal TH presently TH
* 128 0 0 2
* 256 1 1 3
* >=512 2 2 3
*/
/* DMAE is special */
if (CHIP_IS_E2(bp)) {
/* E2 can use optimal TH */
val = w_order;
REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
} else {
val = ((w_order == 0) ? 2 : 3);
REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2);
}
REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
@ -507,9 +519,15 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); /* DMAE is special */
REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
}
/* Validate number of tags suppoted by device */
#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST);
val &= 0xFF;
if (val <= 0x20)
REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
}
/****************************************************************************

View File

@ -377,9 +377,60 @@ static u8 bnx2x_emac_enable(struct link_params *params,
return 0;
}
static void bnx2x_update_bmac2(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
{
/*
* Set rx control: Strip CRC and enable BigMAC to relay
* control packets to the system as well
*/
u32 wb_data[2];
struct bnx2x *bp = params->bp;
u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
NIG_REG_INGRESS_BMAC0_MEM;
u32 val = 0x14;
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
/* Enable BigMAC to react on received Pause packets */
val |= (1<<5);
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL,
wb_data, 2);
udelay(30);
/* Tx control */
val = 0xc0;
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
val |= 0x800000;
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL,
wb_data, 2);
val = 0x8000;
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
wb_data, 2);
/* mac control */
val = 0x3; /* Enable RX and TX */
if (is_lb) {
val |= 0x4; /* Local loopback */
DP(NETIF_MSG_LINK, "enable bmac loopback\n");
}
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
wb_data, 2);
}
static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
static u8 bnx2x_bmac1_enable(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
{
struct bnx2x *bp = params->bp;
@ -389,17 +440,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
u32 wb_data[2];
u32 val;
DP(NETIF_MSG_LINK, "Enabling BigMAC\n");
/* reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
msleep(1);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
/* enable access for bmac registers */
REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
DP(NETIF_MSG_LINK, "Enabling BigMAC1\n");
/* XGXS control */
wb_data[0] = 0x3c;
@ -479,6 +520,103 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
wb_data, 2);
}
return 0;
}
static u8 bnx2x_bmac2_enable(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
NIG_REG_INGRESS_BMAC0_MEM;
u32 wb_data[2];
DP(NETIF_MSG_LINK, "Enabling BigMAC2\n");
wb_data[0] = 0;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
wb_data, 2);
udelay(30);
/* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
wb_data[0] = 0x3c;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr +
BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
wb_data, 2);
udelay(30);
/* tx MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
params->mac_addr[5]);
wb_data[1] = ((params->mac_addr[0] << 8) |
params->mac_addr[1]);
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
wb_data, 2);
udelay(30);
/* Configure SAFC */
wb_data[0] = 0x1000200;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
wb_data, 2);
udelay(30);
/* set rx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE,
wb_data, 2);
udelay(30);
/* set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE,
wb_data, 2);
udelay(30);
/* set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE,
wb_data, 2);
udelay(30);
bnx2x_update_bmac2(params, vars, is_lb);
return 0;
}
u8 bnx2x_bmac_enable(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
{
u8 rc, port = params->port;
struct bnx2x *bp = params->bp;
u32 val;
/* reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
udelay(10);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
/* enable access for bmac registers */
REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
/* Enable BMAC according to BMAC type*/
if (CHIP_IS_E2(bp))
rc = bnx2x_bmac2_enable(params, vars, is_lb);
else
rc = bnx2x_bmac1_enable(params, vars, is_lb);
REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
@ -493,7 +631,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
vars->mac_type = MAC_TYPE_BMAC;
return 0;
return rc;
}
@ -519,13 +657,25 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
nig_bmac_enable) {
if (CHIP_IS_E2(bp)) {
/* Clear Rx Enable bit in BMAC_CONTROL register */
REG_RD_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
REG_RD_DMAE(bp, bmac_addr +
BIGMAC2_REGISTER_BMAC_CONTROL,
wb_data, 2);
wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
REG_WR_DMAE(bp, bmac_addr +
BIGMAC2_REGISTER_BMAC_CONTROL,
wb_data, 2);
} else {
/* Clear Rx Enable bit in BMAC_CONTROL register */
REG_RD_DMAE(bp, bmac_addr +
BIGMAC_REGISTER_BMAC_CONTROL,
wb_data, 2);
wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
REG_WR_DMAE(bp, bmac_addr +
BIGMAC_REGISTER_BMAC_CONTROL,
wb_data, 2);
}
msleep(1);
}
}
@ -821,23 +971,31 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
return -EINVAL;
}
static void bnx2x_set_aer_mmd(struct link_params *params,
static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
struct bnx2x_phy *phy)
{
struct bnx2x *bp = params->bp;
u32 ser_lane;
u16 offset;
u16 offset, aer_val;
struct bnx2x *bp = params->bp;
ser_lane = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
(phy->addr + ser_lane) : 0;
offset = phy->addr + ser_lane;
if (CHIP_IS_E2(bp))
aer_val = 0x2800 + offset - 1;
else
aer_val = 0x3800 + offset;
CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
MDIO_AER_BLOCK_AER_REG, aer_val);
}
static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, 0x3800);
}
/******************************************************************/
@ -2046,12 +2204,12 @@ static u8 bnx2x_init_serdes(struct bnx2x_phy *phy,
u8 rc;
vars->phy_flags |= PHY_SGMII_FLAG;
bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
bnx2x_set_aer_mmd(params, phy);
bnx2x_set_aer_mmd_serdes(params->bp, phy);
rc = bnx2x_reset_unicore(params, phy, 1);
/* reset the SerDes and wait for reset bit return low */
if (rc != 0)
return rc;
bnx2x_set_aer_mmd(params, phy);
bnx2x_set_aer_mmd_serdes(params->bp, phy);
return rc;
}
@ -2076,7 +2234,7 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
vars->phy_flags &= ~PHY_SGMII_FLAG;
bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
bnx2x_set_aer_mmd(params, phy);
bnx2x_set_aer_mmd_xgxs(params, phy);
bnx2x_set_master_ln(params, phy);
rc = bnx2x_reset_unicore(params, phy, 0);
@ -2084,7 +2242,7 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
if (rc != 0)
return rc;
bnx2x_set_aer_mmd(params, phy);
bnx2x_set_aer_mmd_xgxs(params, phy);
/* setting the masterLn_def again after the reset */
bnx2x_set_master_ln(params, phy);
@ -2358,7 +2516,7 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
0x6041);
msleep(200);
/* set aer mmd back */
bnx2x_set_aer_mmd(params, phy);
bnx2x_set_aer_mmd_xgxs(params, phy);
/* and md_devad */
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
@ -2721,6 +2879,9 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u8 gpio_port;
/* HW reset */
if (CHIP_IS_E2(bp))
gpio_port = BP_PATH(bp);
else
gpio_port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_LOW,
@ -2799,6 +2960,7 @@ static u8 bnx2x_update_link_up(struct link_params *params,
}
/* PBF - link up */
if (!(CHIP_IS_E2(bp)))
rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
vars->line_speed);
@ -3443,6 +3605,9 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
u8 gpio_port;
DP(NETIF_MSG_LINK, "Init 8073\n");
if (CHIP_IS_E2(bp))
gpio_port = BP_PATH(bp);
else
gpio_port = params->port;
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
@ -3680,6 +3845,9 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u8 gpio_port;
if (CHIP_IS_E2(bp))
gpio_port = BP_PATH(bp);
else
gpio_port = params->port;
DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
gpio_port);
@ -6371,6 +6539,9 @@ static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
phy->mdio_ctrl = bnx2x_get_emac_base(bp,
SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
port);
if (CHIP_IS_E2(bp))
phy->def_md_devad = E2_DEFAULT_PHY_DEV_ADDR;
else
phy->def_md_devad = DEFAULT_PHY_DEV_ADDR;
DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n",
@ -6742,7 +6913,9 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
}
bnx2x_emac_enable(params, vars, 0);
bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
if (!(CHIP_IS_E2(bp)))
bnx2x_pbf_update(params, vars->flow_ctrl,
vars->line_speed);
/* disable drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
@ -6932,18 +7105,34 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
/****************************************************************************/
/* Common function */
/****************************************************************************/
static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base, u8 phy_index)
static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
u32 shmem_base_path[],
u32 shmem2_base_path[], u8 phy_index,
u32 chip_id)
{
struct bnx2x_phy phy[PORT_MAX];
struct bnx2x_phy *phy_blk[PORT_MAX];
u16 val;
s8 port;
s8 port_of_path = 0;
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u32 shmem_base, shmem2_base;
/* In E2, same phy is using for port0 of the two paths */
if (CHIP_IS_E2(bp)) {
shmem_base = shmem_base_path[port];
shmem2_base = shmem2_base_path[port];
port_of_path = 0;
} else {
shmem_base = shmem_base_path[0];
shmem2_base = shmem2_base_path[0];
port_of_path = port;
}
/* Extract the ext phy address for the port */
if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
port, &phy[port]) !=
port_of_path, &phy[port]) !=
0) {
DP(NETIF_MSG_LINK, "populate_phy failed\n");
return -EINVAL;
@ -6981,9 +7170,15 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem
/* PART2 - Download firmware to both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u16 fw_ver1;
if (CHIP_IS_E2(bp))
port_of_path = 0;
else
port_of_path = port;
DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
phy_blk[port]->addr);
bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
port);
port_of_path);
bnx2x_cl45_read(bp, phy_blk[port],
MDIO_PMA_DEVAD,
@ -7039,9 +7234,10 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem
}
return 0;
}
static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base, u8 phy_index)
static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
u32 shmem_base_path[],
u32 shmem2_base_path[], u8 phy_index,
u32 chip_id)
{
u32 val;
s8 port;
@ -7056,6 +7252,16 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base,
bnx2x_ext_phy_hw_reset(bp, 1);
msleep(5);
for (port = 0; port < PORT_MAX; port++) {
u32 shmem_base, shmem2_base;
/* In E2, same phy is using for port0 of the two paths */
if (CHIP_IS_E2(bp)) {
shmem_base = shmem_base_path[port];
shmem2_base = shmem2_base_path[port];
} else {
shmem_base = shmem_base_path[0];
shmem2_base = shmem2_base_path[0];
}
/* Extract the ext phy address for the port */
if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
port, &phy) !=
@ -7077,14 +7283,16 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base,
return 0;
}
static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base, u8 phy_index)
static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
u32 shmem_base_path[],
u32 shmem2_base_path[], u8 phy_index,
u32 chip_id)
{
s8 port;
u32 swap_val, swap_override;
struct bnx2x_phy phy[PORT_MAX];
struct bnx2x_phy *phy_blk[PORT_MAX];
DP(NETIF_MSG_LINK, "Executing BCM8727 common init\n");
s8 port_of_path;
swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
@ -7099,15 +7307,29 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base,
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u32 shmem_base, shmem2_base;
/* In E2, same phy is using for port0 of the two paths */
if (CHIP_IS_E2(bp)) {
shmem_base = shmem_base_path[port];
shmem2_base = shmem2_base_path[port];
port_of_path = 0;
} else {
shmem_base = shmem_base_path[0];
shmem2_base = shmem2_base_path[0];
port_of_path = port;
}
/* Extract the ext phy address for the port */
if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
port, &phy[port]) !=
port_of_path, &phy[port]) !=
0) {
DP(NETIF_MSG_LINK, "populate phy failed\n");
return -EINVAL;
}
/* disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
port_of_path*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
NIG_MASK_SERDES0_LINK_STATUS |
@ -7133,9 +7355,14 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base,
/* PART2 - Download firmware to both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u16 fw_ver1;
if (CHIP_IS_E2(bp))
port_of_path = 0;
else
port_of_path = port;
DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
phy_blk[port]->addr);
bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
port);
port_of_path);
bnx2x_cl45_read(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER1, &fw_ver1);
@ -7151,29 +7378,32 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base,
return 0;
}
static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base, u8 phy_index,
u32 ext_phy_type)
static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
u32 shmem2_base_path[], u8 phy_index,
u32 ext_phy_type, u32 chip_id)
{
u8 rc = 0;
switch (ext_phy_type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
rc = bnx2x_8073_common_init_phy(bp, shmem_base,
shmem2_base, phy_index);
rc = bnx2x_8073_common_init_phy(bp, shmem_base_path,
shmem2_base_path,
phy_index, chip_id);
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
rc = bnx2x_8727_common_init_phy(bp, shmem_base,
shmem2_base, phy_index);
rc = bnx2x_8727_common_init_phy(bp, shmem_base_path,
shmem2_base_path,
phy_index, chip_id);
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
/* GPIO1 affects both ports, so there's need to pull
it for single port alone */
rc = bnx2x_8726_common_init_phy(bp, shmem_base,
shmem2_base, phy_index);
rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
shmem2_base_path,
phy_index, chip_id);
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
rc = -EINVAL;
@ -7188,8 +7418,8 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base,
return rc;
}
u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base)
u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
u32 shmem2_base_path[], u32 chip_id)
{
u8 rc = 0;
u8 phy_index;
@ -7203,12 +7433,13 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base,
for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
phy_index++) {
ext_phy_config = bnx2x_get_ext_phy_config(bp,
shmem_base,
shmem_base_path[0],
phy_index, 0);
ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
rc |= bnx2x_ext_phy_common_init(bp, shmem_base,
shmem2_base,
phy_index, ext_phy_type);
rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path,
shmem2_base_path,
phy_index, ext_phy_type,
chip_id);
}
return rc;
}

View File

@ -23,6 +23,7 @@
/* Defines */
/***********************************************************/
#define DEFAULT_PHY_DEV_ADDR 3
#define E2_DEFAULT_PHY_DEV_ADDR 5
@ -315,7 +316,8 @@ u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars,
u8 is_serdes);
/* One-time initialization for external phy after power up */
u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base);
u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
u32 shmem2_base_path[], u32 chip_id);
/* Reset the external PHY using GPIO */
void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -185,20 +185,12 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
/* loader */
if (bp->executer_idx) {
int loader_idx = PMF_DMAE_C(bp);
u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
true, DMAE_COMP_GRC);
opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
memset(dmae, 0, sizeof(struct dmae_command));
dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
DMAE_CMD_DST_RESET |
#ifdef __BIG_ENDIAN
DMAE_CMD_ENDIANITY_B_DW_SWAP |
#else
DMAE_CMD_ENDIANITY_DW_SWAP |
#endif
(BP_PORT(bp) ? DMAE_CMD_PORT_1 :
DMAE_CMD_PORT_0) |
(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
dmae->opcode = opcode;
dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
@ -257,19 +249,10 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
bp->executer_idx = 0;
opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
DMAE_CMD_C_ENABLE |
DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
#ifdef __BIG_ENDIAN
DMAE_CMD_ENDIANITY_B_DW_SWAP |
#else
DMAE_CMD_ENDIANITY_DW_SWAP |
#endif
(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
dmae->src_addr_lo = bp->port.port_stx >> 2;
dmae->src_addr_hi = 0;
dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
@ -280,7 +263,7 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
dmae->comp_val = 1;
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
dmae->src_addr_hi = 0;
dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
@ -301,7 +284,6 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
{
struct dmae_command *dmae;
int port = BP_PORT(bp);
int vn = BP_E1HVN(bp);
u32 opcode;
int loader_idx = PMF_DMAE_C(bp);
u32 mac_addr;
@ -316,16 +298,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
bp->executer_idx = 0;
/* MCP */
opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
#ifdef __BIG_ENDIAN
DMAE_CMD_ENDIANITY_B_DW_SWAP |
#else
DMAE_CMD_ENDIANITY_DW_SWAP |
#endif
(port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
(vn << DMAE_CMD_E1HVN_SHIFT));
opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
true, DMAE_COMP_GRC);
if (bp->port.port_stx) {
@ -356,16 +330,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
}
/* MAC */
opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
#ifdef __BIG_ENDIAN
DMAE_CMD_ENDIANITY_B_DW_SWAP |
#else
DMAE_CMD_ENDIANITY_DW_SWAP |
#endif
(port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
(vn << DMAE_CMD_E1HVN_SHIFT));
opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
true, DMAE_COMP_GRC);
if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
@ -376,13 +342,21 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
BIGMAC_REGISTER_TX_STAT_GTBYT */
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
dmae->opcode = opcode;
if (CHIP_IS_E1x(bp)) {
dmae->src_addr_lo = (mac_addr +
BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
} else {
dmae->src_addr_lo = (mac_addr +
BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
dmae->len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
}
dmae->src_addr_hi = 0;
dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
dmae->comp_addr_hi = 0;
dmae->comp_val = 1;
@ -391,15 +365,31 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
BIGMAC_REGISTER_RX_STAT_GRIPJ */
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
dmae->opcode = opcode;
dmae->src_addr_hi = 0;
if (CHIP_IS_E1x(bp)) {
dmae->src_addr_lo = (mac_addr +
BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
dmae->src_addr_hi = 0;
dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
dmae->dst_addr_lo =
U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
offsetof(struct bmac1_stats, rx_stat_gr64_lo));
dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
dmae->dst_addr_hi =
U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
offsetof(struct bmac1_stats, rx_stat_gr64_lo));
dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
} else {
dmae->src_addr_lo =
(mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
dmae->dst_addr_lo =
U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
offsetof(struct bmac2_stats, rx_stat_gr64_lo));
dmae->dst_addr_hi =
U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
offsetof(struct bmac2_stats, rx_stat_gr64_lo));
dmae->len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
}
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
dmae->comp_addr_hi = 0;
dmae->comp_val = 1;
@ -480,16 +470,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
dmae->comp_val = 1;
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
#ifdef __BIG_ENDIAN
DMAE_CMD_ENDIANITY_B_DW_SWAP |
#else
DMAE_CMD_ENDIANITY_DW_SWAP |
#endif
(port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
(vn << DMAE_CMD_E1HVN_SHIFT));
dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
true, DMAE_COMP_PCI);
dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
dmae->src_addr_hi = 0;
@ -519,16 +501,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
bp->executer_idx = 0;
memset(dmae, 0, sizeof(struct dmae_command));
dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
#ifdef __BIG_ENDIAN
DMAE_CMD_ENDIANITY_B_DW_SWAP |
#else
DMAE_CMD_ENDIANITY_DW_SWAP |
#endif
(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
true, DMAE_COMP_PCI);
dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
dmae->dst_addr_lo = bp->func_stx >> 2;
@ -568,7 +542,6 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
static void bnx2x_bmac_stats_update(struct bnx2x *bp)
{
struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
struct bnx2x_eth_stats *estats = &bp->eth_stats;
struct {
@ -576,6 +549,10 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
u32 hi;
} diff;
if (CHIP_IS_E1x(bp)) {
struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
/* the macros below will use "bmac1_stats" type */
UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
@ -606,6 +583,41 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
tx_stat_dot3statsinternalmactransmiterrors);
UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
} else {
struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
/* the macros below will use "bmac2_stats" type */
UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
UPDATE_STAT64(tx_stat_gt127,
tx_stat_etherstatspkts65octetsto127octets);
UPDATE_STAT64(tx_stat_gt255,
tx_stat_etherstatspkts128octetsto255octets);
UPDATE_STAT64(tx_stat_gt511,
tx_stat_etherstatspkts256octetsto511octets);
UPDATE_STAT64(tx_stat_gt1023,
tx_stat_etherstatspkts512octetsto1023octets);
UPDATE_STAT64(tx_stat_gt1518,
tx_stat_etherstatspkts1024octetsto1522octets);
UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
UPDATE_STAT64(tx_stat_gterr,
tx_stat_dot3statsinternalmactransmiterrors);
UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
}
estats->pause_frames_received_hi =
pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
estats->pause_frames_received_lo =
@ -1121,24 +1133,17 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
bp->executer_idx = 0;
opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
DMAE_CMD_C_ENABLE |
DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
#ifdef __BIG_ENDIAN
DMAE_CMD_ENDIANITY_B_DW_SWAP |
#else
DMAE_CMD_ENDIANITY_DW_SWAP |
#endif
(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
if (bp->port.port_stx) {
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
if (bp->func_stx)
dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
dmae->opcode = bnx2x_dmae_opcode_add_comp(
opcode, DMAE_COMP_GRC);
else
dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
dmae->opcode = bnx2x_dmae_opcode_add_comp(
opcode, DMAE_COMP_PCI);
dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
@ -1162,7 +1167,8 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
if (bp->func_stx) {
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
dmae->opcode =
bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
dmae->dst_addr_lo = bp->func_stx >> 2;
@ -1255,16 +1261,8 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
bp->executer_idx = 0;
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
#ifdef __BIG_ENDIAN
DMAE_CMD_ENDIANITY_B_DW_SWAP |
#else
DMAE_CMD_ENDIANITY_DW_SWAP |
#endif
(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
true, DMAE_COMP_PCI);
dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
@ -1282,8 +1280,6 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
static void bnx2x_func_stats_base_init(struct bnx2x *bp)
{
int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX;
int port = BP_PORT(bp);
int func;
u32 func_stx;
/* sanity */
@ -1296,9 +1292,9 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)
func_stx = bp->func_stx;
for (vn = VN_0; vn < vn_max; vn++) {
func = 2*vn + port;
int mb_idx = !CHIP_IS_E2(bp) ? 2*vn + BP_PORT(bp) : vn;
bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
bnx2x_func_stats_init(bp);
bnx2x_hw_stats_post(bp);
bnx2x_stats_comp(bp);
@ -1322,16 +1318,8 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
bp->executer_idx = 0;
memset(dmae, 0, sizeof(struct dmae_command));
dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
#ifdef __BIG_ENDIAN
DMAE_CMD_ENDIANITY_B_DW_SWAP |
#else
DMAE_CMD_ENDIANITY_DW_SWAP |
#endif
(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
true, DMAE_COMP_PCI);
dmae->src_addr_lo = bp->func_stx >> 2;
dmae->src_addr_hi = 0;
dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
@ -1349,7 +1337,7 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
void bnx2x_stats_init(struct bnx2x *bp)
{
int port = BP_PORT(bp);
int func = BP_FUNC(bp);
int mb_idx = BP_FW_MB_IDX(bp);
int i;
bp->stats_pending = 0;
@ -1359,7 +1347,7 @@ void bnx2x_stats_init(struct bnx2x *bp)
/* port and func stats for management */
if (!BP_NOMCP(bp)) {
bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
} else {
bp->port.port_stx = 0;

View File

@ -33,7 +33,8 @@ fw-shipped-$(CONFIG_ADAPTEC_STARFIRE) += adaptec/starfire_rx.bin \
fw-shipped-$(CONFIG_ATARI_DSP56K) += dsp56k/bootstrap.bin
fw-shipped-$(CONFIG_ATM_AMBASSADOR) += atmsar11.fw
fw-shipped-$(CONFIG_BNX2X) += bnx2x/bnx2x-e1-6.0.34.0.fw \
bnx2x/bnx2x-e1h-6.0.34.0.fw
bnx2x/bnx2x-e1h-6.0.34.0.fw \
bnx2x/bnx2x-e2-6.0.34.0.fw
fw-shipped-$(CONFIG_BNX2) += bnx2/bnx2-mips-09-5.0.0.j15.fw \
bnx2/bnx2-rv2p-09-5.0.0.j10.fw \
bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw \