Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (46 commits)
  [NET] ifb: set separate lockdep classes for queue locks
  [IPV6] KCONFIG: Fix description about IPV6_TUNNEL.
  [TCP]: Fix shrinking windows with window scaling
  netpoll: zap_completion_queue: adjust skb->users counter
  bridge: use time_before() in br_fdb_cleanup()
  [TG3]: Fix build warning on sparc32.
  MAINTAINERS: bluez-devel is subscribers-only
  audit: netlink socket can be auto-bound to pid other than current->pid (v2)
  [NET]: Fix permissions of /proc/net
  [SCTP]: Fix a race between module load and protosw access
  [NETFILTER]: ipt_recent: sanity check hit count
  [NETFILTER]: nf_conntrack_h323: logical-bitwise & confusion in process_setup()
  [RT2X00] drivers/net/wireless/rt2x00/rt2x00dev.c: remove dead code, fix warning
  [IPV4]: esp_output() misannotations
  [8021Q]: vlan_dev misannotations
  xfrm: ->eth_proto is __be16
  [IPV4]: ipv4_is_lbcast() misannotations
  [SUNRPC]: net/* NULL noise
  [SCTP]: fix misannotated __sctp_rcv_asconf_lookup()
  [PKT_SCHED]: annotate cls_u32
  ...
This commit is contained in:
Linus Torvalds 2008-03-21 07:57:45 -07:00
commit 7d3628b230
57 changed files with 653 additions and 393 deletions

View file

@ -880,7 +880,7 @@ P: Marcel Holtmann
M: marcel@holtmann.org
P: Maxim Krasnyansky
M: maxk@qualcomm.com
L: bluez-devel@lists.sf.net
L: linux-bluetooth@vger.kernel.org
W: http://bluez.sf.net
W: http://www.bluez.org
W: http://www.holtmann.org/linux/bluetooth/

View file

@ -659,7 +659,7 @@ sunos_nfs_get_server_fd (int fd, struct sockaddr_in *addr)
socket = SOCKET_I(inode);
local.sin_family = AF_INET;
local.sin_addr.s_addr = INADDR_ANY;
local.sin_addr.s_addr = htonl(INADDR_ANY);
/* IPPORT_RESERVED = 1024, can't find the definition in the kernel */
try_port = 1024;

View file

@ -618,7 +618,7 @@ sunos_nfs_get_server_fd (int fd, struct sockaddr_in *addr)
socket = SOCKET_I(inode);
local.sin_family = AF_INET;
local.sin_addr.s_addr = INADDR_ANY;
local.sin_addr.s_addr = htonl(INADDR_ANY);
/* IPPORT_RESERVED = 1024, can't find the definition in the kernel */
try_port = 1024;

View file

@ -1988,19 +1988,19 @@ fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
if (fore200e_getstats(fore200e) < 0)
return -EIO;
tmp.section_bip = cpu_to_be32(fore200e->stats->oc3.section_bip8_errors);
tmp.line_bip = cpu_to_be32(fore200e->stats->oc3.line_bip24_errors);
tmp.path_bip = cpu_to_be32(fore200e->stats->oc3.path_bip8_errors);
tmp.line_febe = cpu_to_be32(fore200e->stats->oc3.line_febe_errors);
tmp.path_febe = cpu_to_be32(fore200e->stats->oc3.path_febe_errors);
tmp.corr_hcs = cpu_to_be32(fore200e->stats->oc3.corr_hcs_errors);
tmp.uncorr_hcs = cpu_to_be32(fore200e->stats->oc3.ucorr_hcs_errors);
tmp.tx_cells = cpu_to_be32(fore200e->stats->aal0.cells_transmitted) +
cpu_to_be32(fore200e->stats->aal34.cells_transmitted) +
cpu_to_be32(fore200e->stats->aal5.cells_transmitted);
tmp.rx_cells = cpu_to_be32(fore200e->stats->aal0.cells_received) +
cpu_to_be32(fore200e->stats->aal34.cells_received) +
cpu_to_be32(fore200e->stats->aal5.cells_received);
tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
tmp.line_bip = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
tmp.path_bip = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
tmp.line_febe = be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
tmp.path_febe = be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
tmp.corr_hcs = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
tmp.uncorr_hcs = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
tmp.tx_cells = be32_to_cpu(fore200e->stats->aal0.cells_transmitted) +
be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
tmp.rx_cells = be32_to_cpu(fore200e->stats->aal0.cells_received) +
be32_to_cpu(fore200e->stats->aal34.cells_received) +
be32_to_cpu(fore200e->stats->aal5.cells_received);
if (arg)
return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
@ -2587,7 +2587,7 @@ fore200e_start_fw(struct fore200e* fore200e)
static int __devinit
fore200e_load_fw(struct fore200e* fore200e)
{
u32* fw_data = (u32*) fore200e->bus->fw_data;
__le32* fw_data = (__le32*) fore200e->bus->fw_data;
u32 fw_size = (u32) *fore200e->bus->fw_size / sizeof(u32);
struct fw_header* fw_header = (struct fw_header*) fw_data;
@ -2965,8 +2965,8 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
" 4b5b:\n"
" crc_header_errors:\t\t%10u\n"
" framing_errors:\t\t%10u\n",
cpu_to_be32(fore200e->stats->phy.crc_header_errors),
cpu_to_be32(fore200e->stats->phy.framing_errors));
be32_to_cpu(fore200e->stats->phy.crc_header_errors),
be32_to_cpu(fore200e->stats->phy.framing_errors));
if (!left--)
return sprintf(page, "\n"
@ -2978,13 +2978,13 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
" path_febe_errors:\t\t%10u\n"
" corr_hcs_errors:\t\t%10u\n"
" ucorr_hcs_errors:\t\t%10u\n",
cpu_to_be32(fore200e->stats->oc3.section_bip8_errors),
cpu_to_be32(fore200e->stats->oc3.path_bip8_errors),
cpu_to_be32(fore200e->stats->oc3.line_bip24_errors),
cpu_to_be32(fore200e->stats->oc3.line_febe_errors),
cpu_to_be32(fore200e->stats->oc3.path_febe_errors),
cpu_to_be32(fore200e->stats->oc3.corr_hcs_errors),
cpu_to_be32(fore200e->stats->oc3.ucorr_hcs_errors));
be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
if (!left--)
return sprintf(page,"\n"
@ -2995,12 +2995,12 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
" vpi no conn:\t\t%10u\n"
" vci out of range:\t\t%10u\n"
" vci no conn:\t\t%10u\n",
cpu_to_be32(fore200e->stats->atm.cells_transmitted),
cpu_to_be32(fore200e->stats->atm.cells_received),
cpu_to_be32(fore200e->stats->atm.vpi_bad_range),
cpu_to_be32(fore200e->stats->atm.vpi_no_conn),
cpu_to_be32(fore200e->stats->atm.vci_bad_range),
cpu_to_be32(fore200e->stats->atm.vci_no_conn));
be32_to_cpu(fore200e->stats->atm.cells_transmitted),
be32_to_cpu(fore200e->stats->atm.cells_received),
be32_to_cpu(fore200e->stats->atm.vpi_bad_range),
be32_to_cpu(fore200e->stats->atm.vpi_no_conn),
be32_to_cpu(fore200e->stats->atm.vci_bad_range),
be32_to_cpu(fore200e->stats->atm.vci_no_conn));
if (!left--)
return sprintf(page,"\n"
@ -3008,9 +3008,9 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
" TX:\t\t\t%10u\n"
" RX:\t\t\t%10u\n"
" dropped:\t\t\t%10u\n",
cpu_to_be32(fore200e->stats->aal0.cells_transmitted),
cpu_to_be32(fore200e->stats->aal0.cells_received),
cpu_to_be32(fore200e->stats->aal0.cells_dropped));
be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
be32_to_cpu(fore200e->stats->aal0.cells_received),
be32_to_cpu(fore200e->stats->aal0.cells_dropped));
if (!left--)
return sprintf(page,"\n"
@ -3026,15 +3026,15 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
" RX:\t\t\t%10u\n"
" dropped:\t\t\t%10u\n"
" protocol errors:\t\t%10u\n",
cpu_to_be32(fore200e->stats->aal34.cells_transmitted),
cpu_to_be32(fore200e->stats->aal34.cells_received),
cpu_to_be32(fore200e->stats->aal34.cells_dropped),
cpu_to_be32(fore200e->stats->aal34.cells_crc_errors),
cpu_to_be32(fore200e->stats->aal34.cells_protocol_errors),
cpu_to_be32(fore200e->stats->aal34.cspdus_transmitted),
cpu_to_be32(fore200e->stats->aal34.cspdus_received),
cpu_to_be32(fore200e->stats->aal34.cspdus_dropped),
cpu_to_be32(fore200e->stats->aal34.cspdus_protocol_errors));
be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
be32_to_cpu(fore200e->stats->aal34.cells_received),
be32_to_cpu(fore200e->stats->aal34.cells_dropped),
be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
be32_to_cpu(fore200e->stats->aal34.cspdus_received),
be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
if (!left--)
return sprintf(page,"\n"
@ -3050,15 +3050,15 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
" dropped:\t\t\t%10u\n"
" CRC errors:\t\t%10u\n"
" protocol errors:\t\t%10u\n",
cpu_to_be32(fore200e->stats->aal5.cells_transmitted),
cpu_to_be32(fore200e->stats->aal5.cells_received),
cpu_to_be32(fore200e->stats->aal5.cells_dropped),
cpu_to_be32(fore200e->stats->aal5.congestion_experienced),
cpu_to_be32(fore200e->stats->aal5.cspdus_transmitted),
cpu_to_be32(fore200e->stats->aal5.cspdus_received),
cpu_to_be32(fore200e->stats->aal5.cspdus_dropped),
cpu_to_be32(fore200e->stats->aal5.cspdus_crc_errors),
cpu_to_be32(fore200e->stats->aal5.cspdus_protocol_errors));
be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
be32_to_cpu(fore200e->stats->aal5.cells_received),
be32_to_cpu(fore200e->stats->aal5.cells_dropped),
be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
be32_to_cpu(fore200e->stats->aal5.cspdus_received),
be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
if (!left--)
return sprintf(page,"\n"
@ -3069,11 +3069,11 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
" large b2:\t\t\t%10u\n"
" RX PDUs:\t\t\t%10u\n"
" TX PDUs:\t\t\t%10lu\n",
cpu_to_be32(fore200e->stats->aux.small_b1_failed),
cpu_to_be32(fore200e->stats->aux.large_b1_failed),
cpu_to_be32(fore200e->stats->aux.small_b2_failed),
cpu_to_be32(fore200e->stats->aux.large_b2_failed),
cpu_to_be32(fore200e->stats->aux.rpd_alloc_failed),
be32_to_cpu(fore200e->stats->aux.small_b1_failed),
be32_to_cpu(fore200e->stats->aux.large_b1_failed),
be32_to_cpu(fore200e->stats->aux.small_b2_failed),
be32_to_cpu(fore200e->stats->aux.large_b2_failed),
be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
fore200e->tx_sat);
if (!left--)

View file

@ -349,90 +349,90 @@ typedef struct oc3_block {
/* physical encoding statistics */
typedef struct stats_phy {
u32 crc_header_errors; /* cells received with bad header CRC */
u32 framing_errors; /* cells received with bad framing */
u32 pad[ 2 ]; /* i960 padding */
__be32 crc_header_errors; /* cells received with bad header CRC */
__be32 framing_errors; /* cells received with bad framing */
__be32 pad[ 2 ]; /* i960 padding */
} stats_phy_t;
/* OC-3 statistics */
typedef struct stats_oc3 {
u32 section_bip8_errors; /* section 8 bit interleaved parity */
u32 path_bip8_errors; /* path 8 bit interleaved parity */
u32 line_bip24_errors; /* line 24 bit interleaved parity */
u32 line_febe_errors; /* line far end block errors */
u32 path_febe_errors; /* path far end block errors */
u32 corr_hcs_errors; /* correctable header check sequence */
u32 ucorr_hcs_errors; /* uncorrectable header check sequence */
u32 pad[ 1 ]; /* i960 padding */
__be32 section_bip8_errors; /* section 8 bit interleaved parity */
__be32 path_bip8_errors; /* path 8 bit interleaved parity */
__be32 line_bip24_errors; /* line 24 bit interleaved parity */
__be32 line_febe_errors; /* line far end block errors */
__be32 path_febe_errors; /* path far end block errors */
__be32 corr_hcs_errors; /* correctable header check sequence */
__be32 ucorr_hcs_errors; /* uncorrectable header check sequence */
__be32 pad[ 1 ]; /* i960 padding */
} stats_oc3_t;
/* ATM statistics */
typedef struct stats_atm {
u32 cells_transmitted; /* cells transmitted */
u32 cells_received; /* cells received */
u32 vpi_bad_range; /* cell drops: VPI out of range */
u32 vpi_no_conn; /* cell drops: no connection for VPI */
u32 vci_bad_range; /* cell drops: VCI out of range */
u32 vci_no_conn; /* cell drops: no connection for VCI */
u32 pad[ 2 ]; /* i960 padding */
__be32 cells_transmitted; /* cells transmitted */
__be32 cells_received; /* cells received */
__be32 vpi_bad_range; /* cell drops: VPI out of range */
__be32 vpi_no_conn; /* cell drops: no connection for VPI */
__be32 vci_bad_range; /* cell drops: VCI out of range */
__be32 vci_no_conn; /* cell drops: no connection for VCI */
__be32 pad[ 2 ]; /* i960 padding */
} stats_atm_t;
/* AAL0 statistics */
typedef struct stats_aal0 {
u32 cells_transmitted; /* cells transmitted */
u32 cells_received; /* cells received */
u32 cells_dropped; /* cells dropped */
u32 pad[ 1 ]; /* i960 padding */
__be32 cells_transmitted; /* cells transmitted */
__be32 cells_received; /* cells received */
__be32 cells_dropped; /* cells dropped */
__be32 pad[ 1 ]; /* i960 padding */
} stats_aal0_t;
/* AAL3/4 statistics */
typedef struct stats_aal34 {
u32 cells_transmitted; /* cells transmitted from segmented PDUs */
u32 cells_received; /* cells reassembled into PDUs */
u32 cells_crc_errors; /* payload CRC error count */
u32 cells_protocol_errors; /* SAR or CS layer protocol errors */
u32 cells_dropped; /* cells dropped: partial reassembly */
u32 cspdus_transmitted; /* CS PDUs transmitted */
u32 cspdus_received; /* CS PDUs received */
u32 cspdus_protocol_errors; /* CS layer protocol errors */
u32 cspdus_dropped; /* reassembled PDUs drop'd (in cells) */
u32 pad[ 3 ]; /* i960 padding */
__be32 cells_transmitted; /* cells transmitted from segmented PDUs */
__be32 cells_received; /* cells reassembled into PDUs */
__be32 cells_crc_errors; /* payload CRC error count */
__be32 cells_protocol_errors; /* SAR or CS layer protocol errors */
__be32 cells_dropped; /* cells dropped: partial reassembly */
__be32 cspdus_transmitted; /* CS PDUs transmitted */
__be32 cspdus_received; /* CS PDUs received */
__be32 cspdus_protocol_errors; /* CS layer protocol errors */
__be32 cspdus_dropped; /* reassembled PDUs drop'd (in cells) */
__be32 pad[ 3 ]; /* i960 padding */
} stats_aal34_t;
/* AAL5 statistics */
typedef struct stats_aal5 {
u32 cells_transmitted; /* cells transmitted from segmented SDUs */
u32 cells_received; /* cells reassembled into SDUs */
u32 cells_dropped; /* reassembled PDUs dropped (in cells) */
u32 congestion_experienced; /* CRC error and length wrong */
u32 cspdus_transmitted; /* CS PDUs transmitted */
u32 cspdus_received; /* CS PDUs received */
u32 cspdus_crc_errors; /* CS PDUs CRC errors */
u32 cspdus_protocol_errors; /* CS layer protocol errors */
u32 cspdus_dropped; /* reassembled PDUs dropped */
u32 pad[ 3 ]; /* i960 padding */
__be32 cells_transmitted; /* cells transmitted from segmented SDUs */
__be32 cells_received; /* cells reassembled into SDUs */
__be32 cells_dropped; /* reassembled PDUs dropped (in cells) */
__be32 congestion_experienced; /* CRC error and length wrong */
__be32 cspdus_transmitted; /* CS PDUs transmitted */
__be32 cspdus_received; /* CS PDUs received */
__be32 cspdus_crc_errors; /* CS PDUs CRC errors */
__be32 cspdus_protocol_errors; /* CS layer protocol errors */
__be32 cspdus_dropped; /* reassembled PDUs dropped */
__be32 pad[ 3 ]; /* i960 padding */
} stats_aal5_t;
/* auxiliary statistics */
typedef struct stats_aux {
u32 small_b1_failed; /* receive BD allocation failures */
u32 large_b1_failed; /* receive BD allocation failures */
u32 small_b2_failed; /* receive BD allocation failures */
u32 large_b2_failed; /* receive BD allocation failures */
u32 rpd_alloc_failed; /* receive PDU allocation failures */
u32 receive_carrier; /* no carrier = 0, carrier = 1 */
u32 pad[ 2 ]; /* i960 padding */
__be32 small_b1_failed; /* receive BD allocation failures */
__be32 large_b1_failed; /* receive BD allocation failures */
__be32 small_b2_failed; /* receive BD allocation failures */
__be32 large_b2_failed; /* receive BD allocation failures */
__be32 rpd_alloc_failed; /* receive PDU allocation failures */
__be32 receive_carrier; /* no carrier = 0, carrier = 1 */
__be32 pad[ 2 ]; /* i960 padding */
} stats_aux_t;
@ -643,10 +643,10 @@ typedef struct host_bsq {
/* header of the firmware image */
typedef struct fw_header {
u32 magic; /* magic number */
u32 version; /* firmware version id */
u32 load_offset; /* fw load offset in board memory */
u32 start_offset; /* fw execution start address in board memory */
__le32 magic; /* magic number */
__le32 version; /* firmware version id */
__le32 load_offset; /* fw load offset in board memory */
__le32 start_offset; /* fw execution start address in board memory */
} fw_header_t;
#define FW_HEADER_MAGIC 0x65726f66 /* 'fore' */

View file

@ -485,9 +485,7 @@ static int el_start_xmit(struct sk_buff *skb, struct net_device *dev)
printk(KERN_DEBUG "%s: burped during tx load.\n",
dev->name);
spin_lock_irqsave(&lp->lock, flags);
}
while (1);
} while (1);
}
/**
@ -612,7 +610,8 @@ static irqreturn_t el_interrupt(int irq, void *dev_id)
dev->stats.tx_packets++;
if (el_debug > 6)
printk(KERN_DEBUG " Tx succeeded %s\n",
(txsr & TX_RDY) ? "." : "but tx is busy!");
(txsr & TX_RDY) ? "." :
"but tx is busy!");
/*
* This is safe the interrupt is atomic WRT itself.
*/
@ -693,7 +692,8 @@ static void el_receive(struct net_device *dev)
if (pkt_len < 60 || pkt_len > 1536) {
if (el_debug)
printk(KERN_DEBUG "%s: bogus packet, length=%d\n", dev->name, pkt_len);
printk(KERN_DEBUG "%s: bogus packet, length=%d\n",
dev->name, pkt_len);
dev->stats.rx_over_errors++;
return;
}
@ -711,7 +711,8 @@ static void el_receive(struct net_device *dev)
outw(0x00, GP_LOW);
if (skb == NULL) {
printk(KERN_INFO "%s: Memory squeeze, dropping packet.\n", dev->name);
printk(KERN_INFO "%s: Memory squeeze, dropping packet.\n",
dev->name);
dev->stats.rx_dropped++;
return;
} else {
@ -748,7 +749,8 @@ static void el_reset(struct net_device *dev)
if (el_debug > 2)
printk(KERN_INFO "3c501 reset...");
outb(AX_RESET, AX_CMD); /* Reset the chip */
outb(AX_LOOP, AX_CMD); /* Aux control, irq and loopback enabled */
/* Aux control, irq and loopback enabled */
outb(AX_LOOP, AX_CMD);
{
int i;
for (i = 0; i < 6; i++) /* Set the station address. */

View file

@ -1765,15 +1765,12 @@ static irqreturn_t atl1_intr(int irq, void *data)
{
struct atl1_adapter *adapter = netdev_priv(data);
u32 status;
u8 update_rx;
int max_ints = 10;
status = adapter->cmb.cmb->int_stats;
if (!status)
return IRQ_NONE;
update_rx = 0;
do {
/* clear CMB interrupt status at once */
adapter->cmb.cmb->int_stats = 0;

View file

@ -1107,9 +1107,15 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
}
q->in_use += ndesc;
if (unlikely(credits - ndesc < q->stop_thres))
if (USE_GTS || !should_restart_tx(q))
t3_stop_queue(dev, qs, q);
if (unlikely(credits - ndesc < q->stop_thres)) {
t3_stop_queue(dev, qs, q);
if (should_restart_tx(q) &&
test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
q->restarts++;
netif_wake_queue(dev);
}
}
gen = q->gen;
q->unacked += ndesc;

View file

@ -131,8 +131,8 @@ IIIa. Ring buffers
IVb. References
http://www.smsc.com/main/datasheets/83c171.pdf
http://www.smsc.com/main/datasheets/83c175.pdf
http://www.smsc.com/main/tools/discontinued/83c171.pdf
http://www.smsc.com/main/tools/discontinued/83c175.pdf
http://scyld.com/expert/NWay.html
http://www.national.com/pf/DP/DP83840A.html
@ -227,7 +227,12 @@ static const u16 media2miictl[16] = {
0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 };
/* The EPIC100 Rx and Tx buffer descriptors. */
/*
* The EPIC100 Rx and Tx buffer descriptors. Note that these
* really ARE host-endian; it's not a misannotation. We tell
* the card to byteswap them internally on big-endian hosts -
* look for #ifdef CONFIG_BIG_ENDIAN in epic_open().
*/
struct epic_tx_desc {
u32 txstatus;
@ -418,7 +423,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
/* Note: the '175 does not have a serial EEPROM. */
for (i = 0; i < 3; i++)
((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
((__le16 *)dev->dev_addr)[i] = cpu_to_le16(inw(ioaddr + LAN0 + i*4));
if (debug > 2) {
dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
@ -682,7 +687,8 @@ static int epic_open(struct net_device *dev)
if (ep->chip_flags & MII_PWRDWN)
outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
#if defined(__powerpc__) || defined(__sparc__) /* Big endian */
/* Tell the chip to byteswap descriptors on big-endian hosts */
#ifdef CONFIG_BIG_ENDIAN
outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
inl(ioaddr + GENCTL);
outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
@ -695,7 +701,7 @@ static int epic_open(struct net_device *dev)
udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
for (i = 0; i < 3; i++)
outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
ep->tx_threshold = TX_FIFO_THRESH;
outl(ep->tx_threshold, ioaddr + TxThresh);
@ -798,7 +804,7 @@ static void epic_restart(struct net_device *dev)
for (i = 16; i > 0; i--)
outl(0x0008, ioaddr + TEST1);
#if defined(__powerpc__) || defined(__sparc__) /* Big endian */
#ifdef CONFIG_BIG_ENDIAN
outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
#else
outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
@ -808,7 +814,7 @@ static void epic_restart(struct net_device *dev)
outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
for (i = 0; i < 3; i++)
outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
ep->tx_threshold = TX_FIFO_THRESH;
outl(ep->tx_threshold, ioaddr + TxThresh);
@ -919,7 +925,7 @@ static void epic_init_ring(struct net_device *dev)
/* Initialize all Rx descriptors. */
for (i = 0; i < RX_RING_SIZE; i++) {
ep->rx_ring[i].rxstatus = 0;
ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
ep->rx_ring[i].buflength = ep->rx_buf_sz;
ep->rx_ring[i].next = ep->rx_ring_dma +
(i+1)*sizeof(struct epic_rx_desc);
ep->rx_skbuff[i] = NULL;
@ -936,7 +942,7 @@ static void epic_init_ring(struct net_device *dev)
skb_reserve(skb, 2); /* 16 byte align the IP header. */
ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
ep->rx_ring[i].rxstatus = DescOwn;
}
ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@ -974,20 +980,20 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
skb->len, PCI_DMA_TODEVICE);
if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
ctrl_word = 0x100000; /* No interrupt */
} else if (free_count == TX_QUEUE_LEN/2) {
ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
ctrl_word = 0x140000; /* Tx-done intr. */
} else if (free_count < TX_QUEUE_LEN - 1) {
ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
ctrl_word = 0x100000; /* No Tx-done intr. */
} else {
/* Leave room for an additional entry. */
ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
ctrl_word = 0x140000; /* Tx-done intr. */
ep->tx_full = 1;
}
ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
ep->tx_ring[entry].buflength = ctrl_word | skb->len;
ep->tx_ring[entry].txstatus =
((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
| cpu_to_le32(DescOwn);
| DescOwn;
ep->cur_tx++;
if (ep->tx_full)
@ -1041,7 +1047,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
struct sk_buff *skb;
int entry = dirty_tx % TX_RING_SIZE;
int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
int txstatus = ep->tx_ring[entry].txstatus;
if (txstatus & DescOwn)
break; /* It still hasn't been Txed */
@ -1163,8 +1169,8 @@ static int epic_rx(struct net_device *dev, int budget)
rx_work_limit = budget;
/* If we own the next entry, it's a new packet. Send it up. */
while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
int status = ep->rx_ring[entry].rxstatus;
if (debug > 4)
printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
@ -1238,7 +1244,8 @@ static int epic_rx(struct net_device *dev, int budget)
skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
work_done++;
}
ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
/* AV: shouldn't we add a barrier here? */
ep->rx_ring[entry].rxstatus = DescOwn;
}
return work_done;
}

View file

@ -184,6 +184,7 @@
#define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */
#define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */
#define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */
#define DEV_NEED_TX_LIMIT 0x40000 /* device needs to limit tx */
enum {
NvRegIrqStatus = 0x000,
@ -635,6 +636,8 @@ union ring_type {
#define NV_RESTART_TX 0x1
#define NV_RESTART_RX 0x2
#define NV_TX_LIMIT_COUNT 16
/* statistics */
struct nv_ethtool_str {
char name[ETH_GSTRING_LEN];
@ -743,6 +746,8 @@ struct nv_skb_map {
struct sk_buff *skb;
dma_addr_t dma;
unsigned int dma_len;
struct ring_desc_ex *first_tx_desc;
struct nv_skb_map *next_tx_ctx;
};
/*
@ -827,6 +832,10 @@ struct fe_priv {
union ring_type tx_ring;
u32 tx_flags;
int tx_ring_size;
int tx_limit;
u32 tx_pkts_in_progress;
struct nv_skb_map *tx_change_owner;
struct nv_skb_map *tx_end_flip;
int tx_stop;
/* vlan fields */
@ -1707,6 +1716,9 @@ static void nv_init_tx(struct net_device *dev)
np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
np->tx_pkts_in_progress = 0;
np->tx_change_owner = NULL;
np->tx_end_flip = NULL;
for (i = 0; i < np->tx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
@ -1720,6 +1732,9 @@ static void nv_init_tx(struct net_device *dev)
}
np->tx_skb[i].skb = NULL;
np->tx_skb[i].dma = 0;
np->tx_skb[i].dma_len = 0;
np->tx_skb[i].first_tx_desc = NULL;
np->tx_skb[i].next_tx_ctx = NULL;
}
}
@ -1771,7 +1786,14 @@ static void nv_drain_tx(struct net_device *dev)
}
if (nv_release_txskb(dev, &np->tx_skb[i]))
dev->stats.tx_dropped++;
np->tx_skb[i].dma = 0;
np->tx_skb[i].dma_len = 0;
np->tx_skb[i].first_tx_desc = NULL;
np->tx_skb[i].next_tx_ctx = NULL;
}
np->tx_pkts_in_progress = 0;
np->tx_change_owner = NULL;
np->tx_end_flip = NULL;
}
static void nv_drain_rx(struct net_device *dev)
@ -1948,6 +1970,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
struct ring_desc_ex* start_tx;
struct ring_desc_ex* prev_tx;
struct nv_skb_map* prev_tx_ctx;
struct nv_skb_map* start_tx_ctx;
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
@ -1965,6 +1988,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
}
start_tx = put_tx = np->put_tx.ex;
start_tx_ctx = np->put_tx_ctx;
/* setup the header buffer */
do {
@ -2037,6 +2061,26 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
spin_lock_irq(&np->lock);
if (np->tx_limit) {
/* Limit the number of outstanding tx. Setup all fragments, but
* do not set the VALID bit on the first descriptor. Save a pointer
* to that descriptor and also for next skb_map element.
*/
if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
if (!np->tx_change_owner)
np->tx_change_owner = start_tx_ctx;
/* remove VALID bit */
tx_flags &= ~NV_TX2_VALID;
start_tx_ctx->first_tx_desc = start_tx;
start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
np->tx_end_flip = np->put_tx_ctx;
} else {
np->tx_pkts_in_progress++;
}
}
/* set tx flags */
start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
np->put_tx.ex = put_tx;
@ -2060,6 +2104,25 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
static inline void nv_tx_flip_ownership(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
np->tx_pkts_in_progress--;
if (np->tx_change_owner) {
__le32 flaglen = le32_to_cpu(np->tx_change_owner->first_tx_desc->flaglen);
flaglen |= NV_TX2_VALID;
np->tx_change_owner->first_tx_desc->flaglen = cpu_to_le32(flaglen);
np->tx_pkts_in_progress++;
np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
if (np->tx_change_owner == np->tx_end_flip)
np->tx_change_owner = NULL;
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
}
}
/*
* nv_tx_done: check for completed packets, release the skbs.
*
@ -2147,6 +2210,10 @@ static void nv_tx_done_optimized(struct net_device *dev, int limit)
dev->stats.tx_packets++;
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
if (np->tx_limit) {
nv_tx_flip_ownership(dev);
}
}
if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
np->get_tx.ex = np->first_tx.ex;
@ -5333,6 +5400,21 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->need_linktimer = 0;
}
/* Limit the number of tx's outstanding for hw bug */
if (id->driver_data & DEV_NEED_TX_LIMIT) {
np->tx_limit = 1;
if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_39) &&
pci_dev->revision >= 0xA2)
np->tx_limit = 0;
}
/* clear phy state and temporarily halt phy interrupts */
writel(0, base + NvRegMIIMask);
phystate = readl(base + NvRegAdapterControl);
@ -5563,19 +5645,19 @@ static struct pci_device_id pci_tbl[] = {
},
{ /* CK804 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
},
{ /* CK804 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
},
{ /* MCP04 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
},
{ /* MCP04 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
},
{ /* MCP51 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
@ -5587,11 +5669,11 @@ static struct pci_device_id pci_tbl[] = {
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT,
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
@ -5611,19 +5693,19 @@ static struct pci_device_id pci_tbl[] = {
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_NEED_TX_LIMIT,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT,
},
{ /* MCP67 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
@ -5659,35 +5741,35 @@ static struct pci_device_id pci_tbl[] = {
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT,
},
{0,},
};

View file

@ -143,6 +143,10 @@ static inline void emac_report_timeout_error(struct emac_instance *dev,
#define STOP_TIMEOUT_1000 13
#define STOP_TIMEOUT_1000_JUMBO 73
static unsigned char default_mcast_addr[] = {
0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
};
/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
"rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
@ -618,6 +622,9 @@ static int emac_configure(struct emac_instance *dev)
if (emac_phy_gpcs(dev->phy.mode))
emac_mii_reset_phy(&dev->phy);
/* Required for Pause packet support in EMAC */
dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
return 0;
}

View file

@ -154,6 +154,10 @@ static int __devexit tah_remove(struct of_device *ofdev)
static struct of_device_id tah_match[] =
{
{
.compatible = "ibm,tah",
},
/* For backward compat with old DT */
{
.type = "tah",
},

View file

@ -35,6 +35,7 @@
#include <linux/moduleparam.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
#include <linux/lockdep.h>
#define TX_TIMEOUT (2*HZ)
@ -227,6 +228,16 @@ static struct rtnl_link_ops ifb_link_ops __read_mostly = {
module_param(numifbs, int, 0);
MODULE_PARM_DESC(numifbs, "Number of ifb devices");
/*
* dev_ifb->queue_lock is usually taken after dev->ingress_lock,
* reversely to e.g. qdisc_lock_tree(). It should be safe until
* ifb doesn't take dev->queue_lock with dev_ifb->ingress_lock.
* But lockdep should know that ifb has different locks from dev.
*/
static struct lock_class_key ifb_queue_lock_key;
static struct lock_class_key ifb_ingress_lock_key;
static int __init ifb_init_one(int index)
{
struct net_device *dev_ifb;
@ -246,6 +257,10 @@ static int __init ifb_init_one(int index)
err = register_netdevice(dev_ifb);
if (err < 0)
goto err;
lockdep_set_class(&dev_ifb->queue_lock, &ifb_queue_lock_key);
lockdep_set_class(&dev_ifb->ingress_lock, &ifb_ingress_lock_key);
return 0;
err:

View file

@ -31,7 +31,6 @@
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/netdevice.h>
#include <linux/tcp.h>
#include <linux/ipv6.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
@ -2484,10 +2483,24 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (skb->protocol == htons(ETH_P_IP))
switch (skb->protocol) {
case __constant_htons(ETH_P_IP):
tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
if (skb->sk && (skb->sk->sk_protocol == IPPROTO_TCP))
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
break;
case __constant_htons(ETH_P_IPV6):
/* XXX what about other V6 headers?? */
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
break;
default:
if (unlikely(net_ratelimit()))
dev_warn(&adapter->pdev->dev,
"partial checksum but proto=%x!\n",
skb->protocol);
break;
}
}
context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);

View file

@ -1221,7 +1221,8 @@ static void __devinit ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
}
#endif
static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static int __devinit ioc3_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned int sw_physid1, sw_physid2;
struct net_device *dev = NULL;

View file

@ -472,7 +472,6 @@ static int ipg_config_autoneg(struct net_device *dev)
unsigned int txflowcontrol;
unsigned int rxflowcontrol;
unsigned int fullduplex;
unsigned int gig;
u32 mac_ctrl_val;
u32 asicctrl;
u8 phyctrl;
@ -489,7 +488,6 @@ static int ipg_config_autoneg(struct net_device *dev)
fullduplex = 0;
txflowcontrol = 0;
rxflowcontrol = 0;
gig = 0;
/* To accomodate a problem in 10Mbps operation,
* set a global flag if PHY running in 10Mbps mode.
@ -511,7 +509,6 @@ static int ipg_config_autoneg(struct net_device *dev)
break;
case IPG_PC_LINK_SPEED_1000MBPS:
printk("1000Mbps.\n");
gig = 1;
break;
default:
printk("undefined!\n");
@ -1900,8 +1897,13 @@ static int ipg_nic_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Specify the TFC field within the TFD. */
txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED |
(IPG_TFC_FRAMEID & cpu_to_le64(sp->tx_current)) |
(IPG_TFC_FRAMEID & sp->tx_current) |
(IPG_TFC_FRAGCOUNT & (1 << 24)));
/*
* 16--17 (WordAlign) <- 3 (disable),
* 0--15 (FrameId) <- sp->tx_current,
* 24--27 (FragCount) <- 1
*/
/* Request TxComplete interrupts at an interval defined
* by the constant IPG_FRAMESBETWEENTXCOMPLETES.

View file

@ -535,9 +535,9 @@ static void ne2k_pci_block_input(struct net_device *dev, int count,
if (count & 3) {
buf += count & ~3;
if (count & 2) {
u16 *b = (u16 *)buf;
__le16 *b = (__le16 *)buf;
*b++ = le16_to_cpu(inw(NE_BASE + NE_DATAPORT));
*b++ = cpu_to_le16(inw(NE_BASE + NE_DATAPORT));
buf = (char *)b;
}
if (count & 1)
@ -600,9 +600,9 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
if (count & 3) {
buf += count & ~3;
if (count & 2) {
u16 *b = (u16 *)buf;
__le16 *b = (__le16 *)buf;
outw(cpu_to_le16(*b++), NE_BASE + NE_DATAPORT);
outw(le16_to_cpu(*b++), NE_BASE + NE_DATAPORT);
buf = (char *)b;
}
}

View file

@ -1644,13 +1644,24 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
}
/* put them in the newtork_list */
scan_info = wl->buf;
scan_info_size = 0;
i = 0;
while (scan_info_size < data_len) {
for (i = 0, scan_info_size = 0, scan_info = wl->buf;
scan_info_size < data_len;
i++, scan_info_size += be16_to_cpu(scan_info->size),
scan_info = (void *)scan_info + be16_to_cpu(scan_info->size)) {
pr_debug("%s:size=%d bssid=%s scan_info=%p\n", __func__,
be16_to_cpu(scan_info->size),
print_mac(mac, &scan_info->bssid[2]), scan_info);
/*
* The wireless firmware may return invalid channel 0 and/or
* invalid rate if the AP emits zero length SSID ie. As this
* scan information is useless, ignore it
*/
if (!be16_to_cpu(scan_info->channel) || !scan_info->rate[0]) {
pr_debug("%s: invalid scan info\n", __func__);
continue;
}
found = 0;
oldest = NULL;
list_for_each_entry(target, &wl->network_list, list) {
@ -1687,10 +1698,6 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
GFP_KERNEL);
if (!target->hwinfo) {
pr_info("%s: kzalloc failed\n", __func__);
i++;
scan_info_size += be16_to_cpu(scan_info->size);
scan_info = (void *)scan_info +
be16_to_cpu(scan_info->size);
continue;
}
/* copy hw scan info */
@ -1709,10 +1716,6 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
if (scan_info->ext_rate[r])
target->rate_ext_len++;
list_move_tail(&target->list, &wl->network_list);
/* bump pointer */
i++;
scan_info_size += be16_to_cpu(scan_info->size);
scan_info = (void *)scan_info + be16_to_cpu(scan_info->size);
}
memset(&data, 0, sizeof(data));
wireless_send_event(port_to_netdev(wl_port(wl)), SIOCGIWSCAN, &data,
@ -2389,6 +2392,8 @@ static struct net_device *gelic_wl_alloc(struct gelic_card *card)
if (!netdev)
return NULL;
strcpy(netdev->name, "wlan%d");
port = netdev_priv(netdev);
port->netdev = netdev;
port->card = card;

View file

@ -239,7 +239,8 @@ static void r6040_free_txbufs(struct net_device *dev)
for (i = 0; i < TX_DCNT; i++) {
if (lp->tx_insert_ptr->skb_ptr) {
pci_unmap_single(lp->pdev, lp->tx_insert_ptr->buf,
pci_unmap_single(lp->pdev,
le32_to_cpu(lp->tx_insert_ptr->buf),
MAX_BUF_SIZE, PCI_DMA_TODEVICE);
dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
lp->rx_insert_ptr->skb_ptr = NULL;
@ -255,7 +256,8 @@ static void r6040_free_rxbufs(struct net_device *dev)
for (i = 0; i < RX_DCNT; i++) {
if (lp->rx_insert_ptr->skb_ptr) {
pci_unmap_single(lp->pdev, lp->rx_insert_ptr->buf,
pci_unmap_single(lp->pdev,
le32_to_cpu(lp->rx_insert_ptr->buf),
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
lp->rx_insert_ptr->skb_ptr = NULL;
@ -542,7 +544,7 @@ static int r6040_rx(struct net_device *dev, int limit)
skb_ptr->dev = priv->dev;
/* Do not count the CRC */
skb_put(skb_ptr, descptr->len - 4);
pci_unmap_single(priv->pdev, descptr->buf,
pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
/* Send to upper layer */
@ -585,7 +587,7 @@ static void r6040_tx(struct net_device *dev)
if (descptr->status & 0x8000)
break; /* Not complete */
skb_ptr = descptr->skb_ptr;
pci_unmap_single(priv->pdev, descptr->buf,
pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
skb_ptr->len, PCI_DMA_TODEVICE);
/* Free buffer */
dev_kfree_skb_irq(skb_ptr);

View file

@ -64,8 +64,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "3.87"
#define DRV_MODULE_RELDATE "December 20, 2007"
#define DRV_MODULE_VERSION "3.88"
#define DRV_MODULE_RELDATE "March 20, 2008"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@ -11841,7 +11841,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
}
if (!is_valid_ether_addr(&dev->dev_addr[0])) {
#ifdef CONFIG_SPARC64
#ifdef CONFIG_SPARC
if (!tg3_get_default_macaddr_sparc(tp))
return 0;
#endif

View file

@ -842,7 +842,7 @@ static inline int de_is_running (struct de_private *de)
static void de_stop_rxtx (struct de_private *de)
{
u32 macmode;
unsigned int work = 1000;
unsigned int i = 1300/100;
macmode = dr32(MacMode);
if (macmode & RxTx) {
@ -850,10 +850,14 @@ static void de_stop_rxtx (struct de_private *de)
dr32(MacMode);
}
while (--work > 0) {
/* wait until in-flight frame completes.
* Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
* Typically expect this loop to end in < 50 us on 100BT.
*/
while (--i) {
if (!de_is_running(de))
return;
cpu_relax();
udelay(100);
}
printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);

View file

@ -154,8 +154,8 @@ static struct ucc_geth_info ugeth_primary_info = {
.rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
.aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
.padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
.numThreadsTx = UCC_GETH_NUM_OF_THREADS_4,
.numThreadsRx = UCC_GETH_NUM_OF_THREADS_4,
.numThreadsTx = UCC_GETH_NUM_OF_THREADS_1,
.numThreadsRx = UCC_GETH_NUM_OF_THREADS_1,
.riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
.riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
};
@ -3975,6 +3975,8 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT;
ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT;
ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
}
/* Set the bus id */

View file

@ -16,10 +16,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
// #define DEBUG // error path messages, extra info
// #define VERBOSE // more; success messages
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@ -318,6 +314,14 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
net->hard_header_len += sizeof (struct rndis_data_hdr);
dev->hard_mtu = net->mtu + net->hard_header_len;
dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1);
if (dev->maxpacket == 0) {
if (netif_msg_probe(dev))
dev_dbg(&intf->dev, "dev->maxpacket can't be 0\n");
retval = -EINVAL;
goto fail_and_release;
}
dev->rx_urb_size = dev->hard_mtu + (dev->maxpacket + 1);
dev->rx_urb_size &= ~(dev->maxpacket - 1);
u.init->max_transfer_size = cpu_to_le32(dev->rx_urb_size);

View file

@ -2024,6 +2024,7 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct fstioc_write wrthdr;
struct fstioc_info info;
unsigned long flags;
void *buf;
dbg(DBG_IOCTL, "ioctl: %x, %p\n", cmd, ifr->ifr_data);
@ -2065,16 +2066,22 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -ENXIO;
}
/* Now copy the data to the card.
* This will probably break on some architectures.
* I'll fix it when I have something to test on.
*/
if (copy_from_user(card->mem + wrthdr.offset,
/* Now copy the data to the card. */
buf = kmalloc(wrthdr.size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (copy_from_user(buf,
ifr->ifr_data + sizeof (struct fstioc_write),
wrthdr.size)) {
kfree(buf);
return -EFAULT;
}
memcpy_toio(card->mem + wrthdr.offset, buf, wrthdr.size);
kfree(buf);
/* Writes to the memory of a card in the reset state constitute
* a download
*/

View file

@ -427,6 +427,8 @@ void ath5k_hw_detach(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
__set_bit(ATH_STAT_INVALID, ah->ah_sc->status);
if (ah->ah_rf_banks != NULL)
kfree(ah->ah_rf_banks);

View file

@ -860,7 +860,7 @@ static void b43_phy_ww(struct b43_wldev *dev)
b43_phy_write(dev, B43_PHY_OFDM(0xBB),
(b43_phy_read(dev, B43_PHY_OFDM(0xBB)) & 0xF000) | 0x0053);
b43_phy_write(dev, B43_PHY_OFDM61,
(b43_phy_read(dev, B43_PHY_OFDM61 & 0xFE1F)) | 0x0120);
(b43_phy_read(dev, B43_PHY_OFDM61) & 0xFE1F) | 0x0120);
b43_phy_write(dev, B43_PHY_OFDM(0x13),
(b43_phy_read(dev, B43_PHY_OFDM(0x13)) & 0x0FFF) | 0x3000);
b43_phy_write(dev, B43_PHY_OFDM(0x14),

View file

@ -63,6 +63,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x0cde, 0x0008)}, /* Sagem XG703A */
{USB_DEVICE(0x0d8e, 0x3762)}, /* DLink DWL-G120 Cohiba */
{USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */
{USB_DEVICE(0x124a, 0x4025)}, /* IOGear GWU513 (GW3887IK chip) */
{USB_DEVICE(0x13b1, 0x000a)}, /* Linksys WUSB54G ver 2 */
{USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */
{USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */

View file

@ -620,6 +620,9 @@ struct rt2x00_dev {
* This will only be compiled in when required.
*/
#ifdef CONFIG_RT2X00_LIB_RFKILL
unsigned long rfkill_state;
#define RFKILL_STATE_ALLOCATED 1
#define RFKILL_STATE_REGISTERED 2
struct rfkill *rfkill;
struct input_polled_dev *poll_dev;
#endif /* CONFIG_RT2X00_LIB_RFKILL */

View file

@ -1098,7 +1098,7 @@ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
return;
/*
* Unregister rfkill.
* Unregister extra components.
*/
rt2x00rfkill_unregister(rt2x00dev);
@ -1139,17 +1139,12 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
__set_bit(DEVICE_INITIALIZED, &rt2x00dev->flags);
/*
* Register the rfkill handler.
* Register the extra components.
*/
status = rt2x00rfkill_register(rt2x00dev);
if (status)
goto exit_unitialize;
rt2x00rfkill_register(rt2x00dev);
return 0;
exit_unitialize:
rt2x00lib_uninitialize(rt2x00dev);
exit:
rt2x00lib_free_ring_entries(rt2x00dev);
@ -1313,15 +1308,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
}
/*
* Allocatie rfkill.
*/
retval = rt2x00rfkill_allocate(rt2x00dev);
if (retval)
goto exit;
/*
* Open the debugfs entry.
* Register extra components.
*/
rt2x00rfkill_allocate(rt2x00dev);
rt2x00debug_register(rt2x00dev);
__set_bit(DEVICE_PRESENT, &rt2x00dev->flags);
@ -1350,13 +1339,9 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
rt2x00lib_uninitialize(rt2x00dev);
/*
* Close debugfs entry.
* Free extra components
*/
rt2x00debug_deregister(rt2x00dev);
/*
* Free rfkill
*/
rt2x00rfkill_free(rt2x00dev);
/*
@ -1395,11 +1380,15 @@ int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state)
__set_bit(DEVICE_STARTED_SUSPEND, &rt2x00dev->flags);
/*
* Disable radio and unitialize all items
* that must be recreated on resume.
* Disable radio.
*/
rt2x00lib_stop(rt2x00dev);
rt2x00lib_uninitialize(rt2x00dev);
/*
* Suspend/disable extra components.
*/
rt2x00rfkill_suspend(rt2x00dev);
rt2x00debug_deregister(rt2x00dev);
exit:
@ -1422,9 +1411,10 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
NOTICE(rt2x00dev, "Waking up.\n");
/*
* Open the debugfs entry.
* Restore/enable extra components.
*/
rt2x00debug_register(rt2x00dev);
rt2x00rfkill_resume(rt2x00dev);
/*
* Only continue if mac80211 had open interfaces.

View file

@ -100,28 +100,36 @@ static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
* RFkill handlers.
*/
#ifdef CONFIG_RT2X00_LIB_RFKILL
int rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev);
void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev);
void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev);
int rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev);
void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev);
void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev);
void rt2x00rfkill_suspend(struct rt2x00_dev *rt2x00dev);
void rt2x00rfkill_resume(struct rt2x00_dev *rt2x00dev);
#else
static inline int rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
static inline void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
{
return 0;
}
static inline void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev)
{
}
static inline int rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
static inline void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
{
return 0;
}
static inline void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev)
{
}
static inline void rt2x00rfkill_suspend(struct rt2x00_dev *rt2x00dev)
{
}
static inline void rt2x00rfkill_resume(struct rt2x00_dev *rt2x00dev)
{
}
#endif /* CONFIG_RT2X00_LIB_RFKILL */
#endif /* RT2X00LIB_H */

View file

@ -69,56 +69,81 @@ static void rt2x00rfkill_poll(struct input_polled_dev *poll_dev)
}
}
int rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
{
int retval;
if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags) ||
!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
return;
if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags))
return 0;
retval = rfkill_register(rt2x00dev->rfkill);
if (retval) {
if (rfkill_register(rt2x00dev->rfkill)) {
ERROR(rt2x00dev, "Failed to register rfkill handler.\n");
return retval;
return;
}
retval = input_register_polled_device(rt2x00dev->poll_dev);
if (retval) {
if (input_register_polled_device(rt2x00dev->poll_dev)) {
ERROR(rt2x00dev, "Failed to register polled device.\n");
rfkill_unregister(rt2x00dev->rfkill);
return retval;
return;
}
__set_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state);
/*
* Force initial poll which will detect the initial device state,
* and correctly sends the signal to the rfkill layer about this
* state.
*/
rt2x00rfkill_poll(rt2x00dev->poll_dev);
return 0;
}
void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev)
{
if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags))
if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags) ||
!test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
return;
input_unregister_polled_device(rt2x00dev->poll_dev);
rfkill_unregister(rt2x00dev->rfkill);
__clear_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state);
}
int rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
static struct input_polled_dev *
rt2x00rfkill_allocate_polldev(struct rt2x00_dev *rt2x00dev)
{
struct device *device = wiphy_dev(rt2x00dev->hw->wiphy);
struct input_polled_dev *poll_dev;
poll_dev = input_allocate_polled_device();
if (!poll_dev)
return NULL;
poll_dev->private = rt2x00dev;
poll_dev->poll = rt2x00rfkill_poll;
poll_dev->poll_interval = RFKILL_POLL_INTERVAL;
poll_dev->input->name = rt2x00dev->ops->name;
poll_dev->input->phys = wiphy_name(rt2x00dev->hw->wiphy);
poll_dev->input->id.bustype = BUS_HOST;
poll_dev->input->id.vendor = 0x1814;
poll_dev->input->id.product = rt2x00dev->chip.rt;
poll_dev->input->id.version = rt2x00dev->chip.rev;
poll_dev->input->dev.parent = wiphy_dev(rt2x00dev->hw->wiphy);
poll_dev->input->evbit[0] = BIT(EV_KEY);
set_bit(KEY_WLAN, poll_dev->input->keybit);
return poll_dev;
}
void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
{
if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags))
return 0;
return;
rt2x00dev->rfkill = rfkill_allocate(device, RFKILL_TYPE_WLAN);
rt2x00dev->rfkill =
rfkill_allocate(wiphy_dev(rt2x00dev->hw->wiphy), RFKILL_TYPE_WLAN);
if (!rt2x00dev->rfkill) {
ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n");
goto exit;
return;
}
rt2x00dev->rfkill->name = rt2x00dev->ops->name;
@ -126,40 +151,49 @@ int rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
rt2x00dev->rfkill->state = -1;
rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio;
rt2x00dev->poll_dev = input_allocate_polled_device();
rt2x00dev->poll_dev = rt2x00rfkill_allocate_polldev(rt2x00dev);
if (!rt2x00dev->poll_dev) {
ERROR(rt2x00dev, "Failed to allocate polled device.\n");
goto exit_free_rfkill;
rfkill_free(rt2x00dev->rfkill);
rt2x00dev->rfkill = NULL;
return;
}
rt2x00dev->poll_dev->private = rt2x00dev;
rt2x00dev->poll_dev->poll = rt2x00rfkill_poll;
rt2x00dev->poll_dev->poll_interval = RFKILL_POLL_INTERVAL;
rt2x00dev->poll_dev->input->name = rt2x00dev->ops->name;
rt2x00dev->poll_dev->input->phys = wiphy_name(rt2x00dev->hw->wiphy);
rt2x00dev->poll_dev->input->id.bustype = BUS_HOST;
rt2x00dev->poll_dev->input->id.vendor = 0x1814;
rt2x00dev->poll_dev->input->id.product = rt2x00dev->chip.rt;
rt2x00dev->poll_dev->input->id.version = rt2x00dev->chip.rev;
rt2x00dev->poll_dev->input->dev.parent = device;
rt2x00dev->poll_dev->input->evbit[0] = BIT(EV_KEY);
set_bit(KEY_WLAN, rt2x00dev->poll_dev->input->keybit);
return 0;
exit_free_rfkill:
rfkill_free(rt2x00dev->rfkill);
exit:
return -ENOMEM;
return;
}
void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev)
{
if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags))
if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags) ||
!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
return;
input_free_polled_device(rt2x00dev->poll_dev);
rt2x00dev->poll_dev = NULL;
rfkill_free(rt2x00dev->rfkill);
rt2x00dev->rfkill = NULL;
}
void rt2x00rfkill_suspend(struct rt2x00_dev *rt2x00dev)
{
if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags) ||
!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
return;
input_free_polled_device(rt2x00dev->poll_dev);
rt2x00dev->poll_dev = NULL;
}
void rt2x00rfkill_resume(struct rt2x00_dev *rt2x00dev)
{
if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags) ||
!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
return;
rt2x00dev->poll_dev = rt2x00rfkill_allocate_polldev(rt2x00dev);
if (!rt2x00dev->poll_dev) {
ERROR(rt2x00dev, "Failed to allocate polled device.\n");
return;
}
}

View file

@ -561,7 +561,7 @@ void ioc3_unregister_submodule(struct ioc3_submodule *is)
printk(KERN_WARNING
"%s: IOC3 submodule %s remove failed "
"for pci_dev %s.\n",
__FUNCTION__, module_name(is->owner),
__func__, module_name(is->owner),
pci_name(idd->pdev));
idd->active[is->id] = 0;
if(is->irq_mask)
@ -611,7 +611,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
if ((ret = pci_enable_device(pdev))) {
printk(KERN_WARNING
"%s: Failed to enable IOC3 device for pci_dev %s.\n",
__FUNCTION__, pci_name(pdev));
__func__, pci_name(pdev));
goto out;
}
pci_set_master(pdev);
@ -623,7 +623,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
if (ret < 0) {
printk(KERN_WARNING "%s: Unable to obtain 64 bit DMA "
"for consistent allocations\n",
__FUNCTION__);
__func__);
}
}
#endif
@ -633,7 +633,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
if (!idd) {
printk(KERN_WARNING
"%s: Failed to allocate IOC3 data for pci_dev %s.\n",
__FUNCTION__, pci_name(pdev));
__func__, pci_name(pdev));
ret = -ENODEV;
goto out_idd;
}
@ -649,7 +649,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
printk(KERN_WARNING
"%s: Unable to find IOC3 resource "
"for pci_dev %s.\n",
__FUNCTION__, pci_name(pdev));
__func__, pci_name(pdev));
ret = -ENODEV;
goto out_pci;
}
@ -657,7 +657,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
printk(KERN_WARNING
"%s: Unable to request IOC3 region "
"for pci_dev %s.\n",
__FUNCTION__, pci_name(pdev));
__func__, pci_name(pdev));
ret = -ENODEV;
goto out_pci;
}
@ -666,7 +666,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
printk(KERN_WARNING
"%s: Unable to remap IOC3 region "
"for pci_dev %s.\n",
__FUNCTION__, pci_name(pdev));
__func__, pci_name(pdev));
ret = -ENODEV;
goto out_misc_region;
}
@ -709,7 +709,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
} else {
printk(KERN_WARNING
"%s : request_irq fails for IRQ 0x%x\n ",
__FUNCTION__, pdev->irq);
__func__, pdev->irq);
}
if (!request_irq(pdev->irq+2, ioc3_intr_io, IRQF_SHARED,
"ioc3-io", (void *)idd)) {
@ -717,7 +717,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
} else {
printk(KERN_WARNING
"%s : request_irq fails for IRQ 0x%x\n ",
__FUNCTION__, pdev->irq+2);
__func__, pdev->irq+2);
}
} else {
if (!request_irq(pdev->irq, ioc3_intr_io, IRQF_SHARED,
@ -726,7 +726,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
} else {
printk(KERN_WARNING
"%s : request_irq fails for IRQ 0x%x\n ",
__FUNCTION__, pdev->irq);
__func__, pdev->irq);
}
}
@ -769,7 +769,7 @@ static void ioc3_remove(struct pci_dev *pdev)
printk(KERN_WARNING
"%s: IOC3 submodule 0x%s remove failed "
"for pci_dev %s.\n",
__FUNCTION__,
__func__,
module_name(ioc3_submodules[id]->owner),
pci_name(pdev));
idd->active[id] = 0;

View file

@ -632,7 +632,7 @@ static int nfs_verify_server_address(struct sockaddr *addr)
switch (addr->sa_family) {
case AF_INET: {
struct sockaddr_in *sa = (struct sockaddr_in *)addr;
return sa->sin_addr.s_addr != INADDR_ANY;
return sa->sin_addr.s_addr != htonl(INADDR_ANY);
}
case AF_INET6: {
struct in6_addr *sa = &((struct sockaddr_in6 *)addr)->sin6_addr;

View file

@ -2290,7 +2290,7 @@ static const struct pid_entry tgid_base_stuff[] = {
DIR("fd", S_IRUSR|S_IXUSR, fd),
DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo),
#ifdef CONFIG_NET
DIR("net", S_IRUGO|S_IXUSR, net),
DIR("net", S_IRUGO|S_IXUGO, net),
#endif
REG("environ", S_IRUSR, environ),
INF("auxv", S_IRUSR, pid_auxv),

View file

@ -265,7 +265,7 @@ static inline bool ipv4_is_local_multicast(__be32 addr)
static inline bool ipv4_is_lbcast(__be32 addr)
{
/* limited broadcast */
return addr == INADDR_BROADCAST;
return addr == htonl(INADDR_BROADCAST);
}
static inline bool ipv4_is_zeronet(__be32 addr)

View file

@ -201,8 +201,8 @@ enum
struct tc_u32_key
{
__u32 mask;
__u32 val;
__be32 mask;
__be32 val;
int off;
int offmask;
};
@ -213,12 +213,12 @@ struct tc_u32_sel
unsigned char offshift;
unsigned char nkeys;
__u16 offmask;
__be16 offmask;
__u16 off;
short offoff;
short hoff;
__u32 hmask;
__be32 hmask;
struct tc_u32_key keys[0];
};

View file

@ -380,15 +380,19 @@ static inline int sctp_sysctl_jiffies_ms(ctl_table *table, int __user *name, int
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
int sctp_v6_init(void);
void sctp_v6_exit(void);
void sctp_v6_pf_init(void);
void sctp_v6_pf_exit(void);
int sctp_v6_protosw_init(void);
void sctp_v6_protosw_exit(void);
int sctp_v6_add_protocol(void);
void sctp_v6_del_protocol(void);
#else /* #ifdef defined(CONFIG_IPV6) */
static inline int sctp_v6_init(void) { return 0; }
static inline void sctp_v6_exit(void) { return; }
static inline void sctp_v6_pf_init(void) { return 0; }
static inline void sctp_v6_pf_exit(void) { return; }
static inline int sctp_v6_protosw_init(void) { return 0; }
static inline void sctp_v6_protosw_exit(void) { return; }
static inline int sctp_v6_add_protocol(void) { return 0; }
static inline void sctp_v6_del_protocol(void) { return; }

View file

@ -277,7 +277,7 @@ extern int __xfrm_state_delete(struct xfrm_state *x);
struct xfrm_state_afinfo {
unsigned int family;
unsigned int proto;
unsigned int eth_proto;
__be16 eth_proto;
struct module *owner;
const struct xfrm_type *type_map[IPPROTO_MAX];
struct xfrm_mode *mode_map[XFRM_MODE_MAX];

View file

@ -78,9 +78,13 @@ static int audit_default;
/* If auditing cannot proceed, audit_failure selects what happens. */
static int audit_failure = AUDIT_FAIL_PRINTK;
/* If audit records are to be written to the netlink socket, audit_pid
* contains the (non-zero) pid. */
/*
* If audit records are to be written to the netlink socket, audit_pid
* contains the pid of the auditd process and audit_nlk_pid contains
* the pid to use to send netlink messages to that process.
*/
int audit_pid;
static int audit_nlk_pid;
/* If audit_rate_limit is non-zero, limit the rate of sending audit records
* to that number per second. This prevents DoS attacks, but results in
@ -350,7 +354,7 @@ static int kauditd_thread(void *dummy)
wake_up(&audit_backlog_wait);
if (skb) {
if (audit_pid) {
int err = netlink_unicast(audit_sock, skb, audit_pid, 0);
int err = netlink_unicast(audit_sock, skb, audit_nlk_pid, 0);
if (err < 0) {
BUG_ON(err != -ECONNREFUSED); /* Shoudn't happen */
printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid);
@ -626,6 +630,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
sid, 1);
audit_pid = new_pid;
audit_nlk_pid = NETLINK_CB(skb).pid;
}
if (status_get->mask & AUDIT_STATUS_RATE_LIMIT)
err = audit_set_rate_limit(status_get->rate_limit,

View file

@ -382,7 +382,7 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_dev_info(dev)->cnt_encap_on_xmit++;
pr_debug("%s: proto to encap: 0x%hx\n",
__FUNCTION__, htons(veth->h_vlan_proto));
__FUNCTION__, ntohs(veth->h_vlan_proto));
/* Construct the second two bytes. This field looks something
* like:
* usr_priority: 3 bits (high bits)

View file

@ -136,7 +136,7 @@ void br_fdb_cleanup(unsigned long _data)
this_timer = f->ageing_timer + delay;
if (time_before_eq(this_timer, jiffies))
fdb_delete(f);
else if (this_timer < next_timer)
else if (time_before(this_timer, next_timer))
next_timer = this_timer;
}
}

View file

@ -215,10 +215,12 @@ static void zap_completion_queue(void)
while (clist != NULL) {
struct sk_buff *skb = clist;
clist = clist->next;
if (skb->destructor)
if (skb->destructor) {
atomic_inc(&skb->users);
dev_kfree_skb_any(skb); /* put this one back */
else
} else {
__kfree_skb(skb);
}
}
}

View file

@ -458,7 +458,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
err = -EADDRNOTAVAIL;
if (!sysctl_ip_nonlocal_bind &&
!inet->freebind &&
addr->sin_addr.s_addr != INADDR_ANY &&
addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST &&
chk_addr_ret != RTN_BROADCAST)

View file

@ -168,7 +168,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
struct xfrm_encap_tmpl *encap = x->encap;
struct udphdr *uh;
__be32 *udpdata32;
unsigned int sport, dport;
__be16 sport, dport;
int encap_type;
spin_lock_bh(&x->lock);

View file

@ -583,7 +583,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
}
if (!mreq.imr_ifindex) {
if (mreq.imr_address.s_addr == INADDR_ANY) {
if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
inet->mc_index = 0;
inet->mc_addr = 0;
err = 0;

View file

@ -103,6 +103,7 @@
- '3' from resolv.h */
#define NONE __constant_htonl(INADDR_NONE)
#define ANY __constant_htonl(INADDR_ANY)
/*
* Public IP configuration
@ -1479,19 +1480,19 @@ static int __init ip_auto_config_setup(char *addrs)
DBG(("IP-Config: Parameter #%d: `%s'\n", num, ip));
switch (num) {
case 0:
if ((ic_myaddr = in_aton(ip)) == INADDR_ANY)
if ((ic_myaddr = in_aton(ip)) == ANY)
ic_myaddr = NONE;
break;
case 1:
if ((ic_servaddr = in_aton(ip)) == INADDR_ANY)
if ((ic_servaddr = in_aton(ip)) == ANY)
ic_servaddr = NONE;
break;
case 2:
if ((ic_gateway = in_aton(ip)) == INADDR_ANY)
if ((ic_gateway = in_aton(ip)) == ANY)
ic_gateway = NONE;
break;
case 3:
if ((ic_netmask = in_aton(ip)) == INADDR_ANY)
if ((ic_netmask = in_aton(ip)) == ANY)
ic_netmask = NONE;
break;
case 4:

View file

@ -252,6 +252,8 @@ recent_mt_check(const char *tablename, const void *ip,
if ((info->check_set & (IPT_RECENT_SET | IPT_RECENT_REMOVE)) &&
(info->seconds || info->hit_count))
return false;
if (info->hit_count > ip_pkt_list_tot)
return false;
if (info->name[0] == '\0' ||
strnlen(info->name, IPT_RECENT_NAME_LEN) == IPT_RECENT_NAME_LEN)
return false;

View file

@ -255,7 +255,7 @@ static u16 tcp_select_window(struct sock *sk)
*
* Relax Will Robinson.
*/
new_win = cur_win;
new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
}
tp->rcv_wnd = new_win;
tp->rcv_wup = tp->rcv_nxt;

View file

@ -179,11 +179,12 @@ config IPV6_SIT
Saying M here will produce a module called sit.ko. If unsure, say Y.
config IPV6_TUNNEL
tristate "IPv6: IPv6-in-IPv6 tunnel"
tristate "IPv6: IP-in-IPv6 tunnel (RFC2473)"
select INET6_TUNNEL
depends on IPV6
---help---
Support for IPv6-in-IPv6 tunnels described in RFC 2473.
Support for IPv6-in-IPv6 and IPv4-in-IPv6 tunnels described in
RFC 2473.
If unsure, say N.

View file

@ -842,7 +842,7 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
set_h225_addr = rcu_dereference(set_h225_addr_hook);
if ((setup->options & eSetup_UUIE_destCallSignalAddress) &&
(set_h225_addr) && ct->status && IPS_NAT_MASK &&
(set_h225_addr) && ct->status & IPS_NAT_MASK &&
get_h225_addr(ct, *data, &setup->destCallSignalAddress,
&addr, &port) &&
memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) {

View file

@ -89,7 +89,7 @@ static const struct tcf_ext_map u32_ext_map = {
static struct tc_u_common *u32_list;
static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift)
static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift)
{
unsigned h = ntohl(key & sel->hmask)>>fshift;
@ -137,7 +137,7 @@ next_knode:
for (i = n->sel.nkeys; i>0; i--, key++) {
if ((*(u32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) {
if ((*(__be32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) {
n = n->next;
goto next_knode;
}
@ -182,7 +182,7 @@ check_terminal:
ht = n->ht_down;
sel = 0;
if (ht->divisor)
sel = ht->divisor&u32_hash_fold(*(u32*)(ptr+n->sel.hoff), &n->sel,n->fshift);
sel = ht->divisor&u32_hash_fold(*(__be32*)(ptr+n->sel.hoff), &n->sel,n->fshift);
if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
goto next_ht;
@ -190,7 +190,7 @@ check_terminal:
if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
off2 = n->sel.off + 3;
if (n->sel.flags&TC_U32_VAROFFSET)
off2 += ntohs(n->sel.offmask & *(u16*)(ptr+n->sel.offoff)) >>n->sel.offshift;
off2 += ntohs(n->sel.offmask & *(__be16*)(ptr+n->sel.offoff)) >>n->sel.offshift;
off2 &= ~3;
}
if (n->sel.flags&TC_U32_EAT) {

View file

@ -35,7 +35,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em,
if (!tcf_valid_offset(skb, ptr, sizeof(u32)))
return 0;
return !(((*(u32*) ptr) ^ key->val) & key->mask);
return !(((*(__be32*) ptr) ^ key->val) & key->mask);
}
static struct tcf_ematch_ops em_u32_ops = {

View file

@ -944,7 +944,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
static struct sctp_association *__sctp_rcv_asconf_lookup(
sctp_chunkhdr_t *ch,
const union sctp_addr *laddr,
__be32 peer_port,
__be16 peer_port,
struct sctp_transport **transportp)
{
sctp_addip_chunk_t *asconf = (struct sctp_addip_chunk *)ch;

View file

@ -1014,15 +1014,24 @@ static struct sctp_pf sctp_pf_inet6 = {
};
/* Initialize IPv6 support and register with socket layer. */
int sctp_v6_init(void)
void sctp_v6_pf_init(void)
{
int rc;
/* Register the SCTP specific PF_INET6 functions. */
sctp_register_pf(&sctp_pf_inet6, PF_INET6);
/* Register the SCTP specific AF_INET6 functions. */
sctp_register_af(&sctp_af_inet6);
}
void sctp_v6_pf_exit(void)
{
list_del(&sctp_af_inet6.list);
}
/* Initialize IPv6 support and register with socket layer. */
int sctp_v6_protosw_init(void)
{
int rc;
rc = proto_register(&sctpv6_prot, 1);
if (rc)
@ -1035,6 +1044,14 @@ int sctp_v6_init(void)
return 0;
}
void sctp_v6_protosw_exit(void)
{
inet6_unregister_protosw(&sctpv6_seqpacket_protosw);
inet6_unregister_protosw(&sctpv6_stream_protosw);
proto_unregister(&sctpv6_prot);
}
/* Register with inet6 layer. */
int sctp_v6_add_protocol(void)
{
@ -1047,15 +1064,6 @@ int sctp_v6_add_protocol(void)
return 0;
}
/* IPv6 specific exit support. */
void sctp_v6_exit(void)
{
inet6_unregister_protosw(&sctpv6_seqpacket_protosw);
inet6_unregister_protosw(&sctpv6_stream_protosw);
proto_unregister(&sctpv6_prot);
list_del(&sctp_af_inet6.list);
}
/* Unregister with inet6 layer. */
void sctp_v6_del_protocol(void)
{

View file

@ -337,14 +337,14 @@ static int sctp_v4_cmp_addr(const union sctp_addr *addr1,
static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port)
{
addr->v4.sin_family = AF_INET;
addr->v4.sin_addr.s_addr = INADDR_ANY;
addr->v4.sin_addr.s_addr = htonl(INADDR_ANY);
addr->v4.sin_port = port;
}
/* Is this a wildcard address? */
static int sctp_v4_is_any(const union sctp_addr *addr)
{
return INADDR_ANY == addr->v4.sin_addr.s_addr;
return htonl(INADDR_ANY) == addr->v4.sin_addr.s_addr;
}
/* This function checks if the address is a valid address to be used for
@ -375,7 +375,7 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
int ret = inet_addr_type(&init_net, addr->v4.sin_addr.s_addr);
if (addr->v4.sin_addr.s_addr != INADDR_ANY &&
if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) &&
ret != RTN_LOCAL &&
!sp->inet.freebind &&
!sysctl_ip_nonlocal_bind)
@ -785,8 +785,8 @@ static int sctp_inet_cmp_addr(const union sctp_addr *addr1,
/* PF_INET only supports AF_INET addresses. */
if (addr1->sa.sa_family != addr2->sa.sa_family)
return 0;
if (INADDR_ANY == addr1->v4.sin_addr.s_addr ||
INADDR_ANY == addr2->v4.sin_addr.s_addr)
if (htonl(INADDR_ANY) == addr1->v4.sin_addr.s_addr ||
htonl(INADDR_ANY) == addr2->v4.sin_addr.s_addr)
return 1;
if (addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr)
return 1;
@ -992,6 +992,58 @@ static void cleanup_sctp_mibs(void)
free_percpu(sctp_statistics[1]);
}
static void sctp_v4_pf_init(void)
{
/* Initialize the SCTP specific PF functions. */
sctp_register_pf(&sctp_pf_inet, PF_INET);
sctp_register_af(&sctp_af_inet);
}
static void sctp_v4_pf_exit(void)
{
list_del(&sctp_af_inet.list);
}
static int sctp_v4_protosw_init(void)
{
int rc;
rc = proto_register(&sctp_prot, 1);
if (rc)
return rc;
/* Register SCTP(UDP and TCP style) with socket layer. */
inet_register_protosw(&sctp_seqpacket_protosw);
inet_register_protosw(&sctp_stream_protosw);
return 0;
}
static void sctp_v4_protosw_exit(void)
{
inet_unregister_protosw(&sctp_stream_protosw);
inet_unregister_protosw(&sctp_seqpacket_protosw);
proto_unregister(&sctp_prot);
}
static int sctp_v4_add_protocol(void)
{
/* Register notifier for inet address additions/deletions. */
register_inetaddr_notifier(&sctp_inetaddr_notifier);
/* Register SCTP with inet layer. */
if (inet_add_protocol(&sctp_protocol, IPPROTO_SCTP) < 0)
return -EAGAIN;
return 0;
}
static void sctp_v4_del_protocol(void)
{
inet_del_protocol(&sctp_protocol, IPPROTO_SCTP);
unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
}
/* Initialize the universe into something sensible. */
SCTP_STATIC __init int sctp_init(void)
{
@ -1035,8 +1087,6 @@ SCTP_STATIC __init int sctp_init(void)
/* Initialize object count debugging. */
sctp_dbg_objcnt_init();
/* Initialize the SCTP specific PF functions. */
sctp_register_pf(&sctp_pf_inet, PF_INET);
/*
* 14. Suggested SCTP Protocol Parameter Values
*/
@ -1194,19 +1244,22 @@ SCTP_STATIC __init int sctp_init(void)
sctp_sysctl_register();
INIT_LIST_HEAD(&sctp_address_families);
sctp_register_af(&sctp_af_inet);
sctp_v4_pf_init();
sctp_v6_pf_init();
/* Initialize the local address list. */
INIT_LIST_HEAD(&sctp_local_addr_list);
spin_lock_init(&sctp_local_addr_lock);
sctp_get_local_addr_list();
status = sctp_v4_protosw_init();
status = proto_register(&sctp_prot, 1);
if (status)
goto err_proto_register;
goto err_protosw_init;
/* Register SCTP(UDP and TCP style) with socket layer. */
inet_register_protosw(&sctp_seqpacket_protosw);
inet_register_protosw(&sctp_stream_protosw);
status = sctp_v6_init();
status = sctp_v6_protosw_init();
if (status)
goto err_v6_init;
goto err_v6_protosw_init;
/* Initialize the control inode/socket for handling OOTB packets. */
if ((status = sctp_ctl_sock_init())) {
@ -1215,19 +1268,9 @@ SCTP_STATIC __init int sctp_init(void)
goto err_ctl_sock_init;
}
/* Initialize the local address list. */
INIT_LIST_HEAD(&sctp_local_addr_list);
spin_lock_init(&sctp_local_addr_lock);
sctp_get_local_addr_list();
/* Register notifier for inet address additions/deletions. */
register_inetaddr_notifier(&sctp_inetaddr_notifier);
/* Register SCTP with inet layer. */
if (inet_add_protocol(&sctp_protocol, IPPROTO_SCTP) < 0) {
status = -EAGAIN;
status = sctp_v4_add_protocol();
if (status)
goto err_add_protocol;
}
/* Register SCTP with inet6 layer. */
status = sctp_v6_add_protocol();
@ -1238,18 +1281,18 @@ SCTP_STATIC __init int sctp_init(void)
out:
return status;
err_v6_add_protocol:
inet_del_protocol(&sctp_protocol, IPPROTO_SCTP);
unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
sctp_v6_del_protocol();
err_add_protocol:
sctp_free_local_addr_list();
sctp_v4_del_protocol();
sock_release(sctp_ctl_socket);
err_ctl_sock_init:
sctp_v6_exit();
err_v6_init:
inet_unregister_protosw(&sctp_stream_protosw);
inet_unregister_protosw(&sctp_seqpacket_protosw);
proto_unregister(&sctp_prot);
err_proto_register:
sctp_v6_protosw_exit();
err_v6_protosw_init:
sctp_v4_protosw_exit();
err_protosw_init:
sctp_free_local_addr_list();
sctp_v4_pf_exit();
sctp_v6_pf_exit();
sctp_sysctl_unregister();
list_del(&sctp_af_inet.list);
free_pages((unsigned long)sctp_port_hashtable,
@ -1282,23 +1325,21 @@ SCTP_STATIC __exit void sctp_exit(void)
/* Unregister with inet6/inet layers. */
sctp_v6_del_protocol();
inet_del_protocol(&sctp_protocol, IPPROTO_SCTP);
/* Unregister notifier for inet address additions/deletions. */
unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
/* Free the local address list. */
sctp_free_local_addr_list();
sctp_v4_del_protocol();
/* Free the control endpoint. */
sock_release(sctp_ctl_socket);
/* Cleanup v6 initializations. */
sctp_v6_exit();
/* Free protosw registrations */
sctp_v6_protosw_exit();
sctp_v4_protosw_exit();
/* Free the local address list. */
sctp_free_local_addr_list();
/* Unregister with socket layer. */
inet_unregister_protosw(&sctp_stream_protosw);
inet_unregister_protosw(&sctp_seqpacket_protosw);
sctp_v6_pf_exit();
sctp_v4_pf_exit();
sctp_sysctl_unregister();
list_del(&sctp_af_inet.list);
@ -1317,8 +1358,6 @@ SCTP_STATIC __exit void sctp_exit(void)
kmem_cache_destroy(sctp_chunk_cachep);
kmem_cache_destroy(sctp_bucket_cachep);
proto_unregister(&sctp_prot);
}
module_init(sctp_init);

View file

@ -317,7 +317,7 @@ gss_delete_sec_context(struct gss_ctx **context_handle)
if (!*context_handle)
return(GSS_S_NO_CONTEXT);
if ((*context_handle)->internal_ctx_id != 0)
if ((*context_handle)->internal_ctx_id)
(*context_handle)->mech_type->gm_ops
->gss_delete_sec_context((*context_handle)
->internal_ctx_id);

View file

@ -185,7 +185,7 @@ int svc_create_xprt(struct svc_serv *serv, char *xprt_name, unsigned short port,
struct svc_xprt_class *xcl;
struct sockaddr_in sin = {
.sin_family = AF_INET,
.sin_addr.s_addr = INADDR_ANY,
.sin_addr.s_addr = htonl(INADDR_ANY),
.sin_port = htons(port),
};
dprintk("svc: creating transport %s[%d]\n", xprt_name, port);