Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

This commit is contained in:
David S. Miller 2011-03-15 15:15:17 -07:00
commit c337ffb68e
13 changed files with 70 additions and 45 deletions

View File

@ -2558,18 +2558,15 @@ enslaved.
16. Resources and Links
=======================
The latest version of the bonding driver can be found in the latest
The latest version of the bonding driver can be found in the latest
version of the linux kernel, found on http://kernel.org
The latest version of this document can be found in either the latest
kernel source (named Documentation/networking/bonding.txt), or on the
bonding sourceforge site:
The latest version of this document can be found in the latest kernel
source (named Documentation/networking/bonding.txt).
http://www.sourceforge.net/projects/bonding
Discussions regarding the bonding driver take place primarily on the
bonding-devel mailing list, hosted at sourceforge.net. If you have
questions or problems, post them to the list. The list address is:
Discussions regarding the usage of the bonding driver take place on the
bonding-devel mailing list, hosted at sourceforge.net. If you have questions or
problems, post them to the list. The list address is:
bonding-devel@lists.sourceforge.net
@ -2578,6 +2575,17 @@ be found at:
https://lists.sourceforge.net/lists/listinfo/bonding-devel
Discussions regarding the developpement of the bonding driver take place
on the main Linux network mailing list, hosted at vger.kernel.org. The list
address is:
netdev@vger.kernel.org
The administrative interface (to subscribe or unsubscribe) can
be found at:
http://vger.kernel.org/vger-lists.html#netdev
Donald Becker's Ethernet Drivers and diag programs may be found at :
- http://web.archive.org/web/*/http://www.scyld.com/network/

View File

@ -425,11 +425,6 @@ static irqreturn_t ariadne_interrupt(int irq, void *data)
int csr0, boguscnt;
int handled = 0;
if (dev == NULL) {
printk(KERN_WARNING "ariadne_interrupt(): irq for unknown device.\n");
return IRQ_NONE;
}
lance->RAP = CSR0; /* PCnet-ISA Controller Status */
if (!(lance->RDP & INTR)) /* Check if any interrupt has been */

View File

@ -4229,7 +4229,7 @@ void bnx2x_update_coalesce(struct bnx2x *bp)
for_each_eth_queue(bp, i)
bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
bp->rx_ticks, bp->tx_ticks);
bp->tx_ticks, bp->rx_ticks);
}
static void bnx2x_init_sp_ring(struct bnx2x *bp)

View File

@ -659,7 +659,7 @@ failed:
static void unlink_all_urbs(struct esd_usb2 *dev)
{
struct esd_usb2_net_priv *priv;
int i;
int i, j;
usb_kill_anchored_urbs(&dev->rx_submitted);
for (i = 0; i < dev->net_count; i++) {
@ -668,8 +668,8 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
usb_kill_anchored_urbs(&priv->tx_submitted);
atomic_set(&priv->active_tx_jobs, 0);
for (i = 0; i < MAX_TX_URBS; i++)
priv->tx_contexts[i].echo_index = MAX_TX_URBS;
for (j = 0; j < MAX_TX_URBS; j++)
priv->tx_contexts[j].echo_index = MAX_TX_URBS;
}
}
}

View File

@ -1730,7 +1730,7 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
emac_read(EMAC_TXCARRIERSENSE);
emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
ndev->stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN);
ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN);
emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
return &ndev->stats;

View File

@ -219,9 +219,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
const struct macvlan_dev *vlan = netdev_priv(dev);
const struct macvlan_port *port = vlan->port;
const struct macvlan_dev *dest;
__u8 ip_summed = skb->ip_summed;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
const struct ethhdr *eth = (void *)skb->data;
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* send to other bridge ports directly */
if (is_multicast_ether_addr(eth->h_dest)) {
@ -241,6 +243,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
}
xmit_world:
skb->ip_summed = ip_summed;
skb_set_dev(skb, vlan->lowerdev);
return dev_queue_xmit(skb);
}

View File

@ -791,8 +791,8 @@ static int smsc911x_mii_probe(struct net_device *dev)
return -ENODEV;
}
SMSC_TRACE(PROBE, "PHY %d: addr %d, phy_id 0x%08X",
phy_addr, phydev->addr, phydev->phy_id);
SMSC_TRACE(PROBE, "PHY: addr %d, phy_id 0x%08X",
phydev->addr, phydev->phy_id);
ret = phy_connect_direct(dev, phydev,
&smsc911x_phy_adjust_link, 0,

View File

@ -328,13 +328,13 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
static const char ifname[] = "usbpn%d";
const struct usb_cdc_union_desc *union_header = NULL;
const struct usb_cdc_header_desc *phonet_header = NULL;
const struct usb_host_interface *data_desc;
struct usb_interface *data_intf;
struct usb_device *usbdev = interface_to_usbdev(intf);
struct net_device *dev;
struct usbpn_dev *pnd;
u8 *data;
int phonet = 0;
int len, err;
data = intf->altsetting->extra;
@ -355,10 +355,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
(struct usb_cdc_union_desc *)data;
break;
case 0xAB:
if (phonet_header || dlen < 5)
break;
phonet_header =
(struct usb_cdc_header_desc *)data;
phonet = 1;
break;
}
}
@ -366,7 +363,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
len -= dlen;
}
if (!union_header || !phonet_header)
if (!union_header || !phonet)
return -EINVAL;
data_intf = usb_ifnum_to_if(usbdev, union_header->bSlaveInterface0);
@ -392,7 +389,6 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
pnd = netdev_priv(dev);
SET_NETDEV_DEV(dev, &intf->dev);
netif_stop_queue(dev);
pnd->dev = dev;
pnd->usb = usb_get_dev(usbdev);

View File

@ -122,7 +122,7 @@ struct netfront_info {
struct mmu_update rx_mmu[NET_RX_RING_SIZE];
/* Statistics */
int rx_gso_checksum_fixup;
unsigned long rx_gso_checksum_fixup;
};
struct netfront_rx_info {
@ -1692,7 +1692,7 @@ static void xennet_get_ethtool_stats(struct net_device *dev,
int i;
for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
data[i] = *(int *)(np + xennet_stats[i].offset);
data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
}
static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)

View File

@ -375,12 +375,12 @@ static void br_make_forwarding(struct net_bridge_port *p)
if (p->state != BR_STATE_BLOCKING)
return;
if (br->forward_delay == 0) {
if (br->stp_enabled == BR_NO_STP || br->forward_delay == 0) {
p->state = BR_STATE_FORWARDING;
br_topology_change_detection(br);
del_timer(&p->forward_delay_timer);
}
else if (p->br->stp_enabled == BR_KERNEL_STP)
else if (br->stp_enabled == BR_KERNEL_STP)
p->state = BR_STATE_LISTENING;
else
p->state = BR_STATE_LEARNING;

View File

@ -39,7 +39,7 @@
/* Number of delay samples for detecting the increase of delay */
#define HYSTART_MIN_SAMPLES 8
#define HYSTART_DELAY_MIN (2U<<3)
#define HYSTART_DELAY_MIN (4U<<3)
#define HYSTART_DELAY_MAX (16U<<3)
#define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX)
@ -52,6 +52,7 @@ static int tcp_friendliness __read_mostly = 1;
static int hystart __read_mostly = 1;
static int hystart_detect __read_mostly = HYSTART_ACK_TRAIN | HYSTART_DELAY;
static int hystart_low_window __read_mostly = 16;
static int hystart_ack_delta __read_mostly = 2;
static u32 cube_rtt_scale __read_mostly;
static u32 beta_scale __read_mostly;
@ -75,6 +76,8 @@ MODULE_PARM_DESC(hystart_detect, "hyrbrid slow start detection mechanisms"
" 1: packet-train 2: delay 3: both packet-train and delay");
module_param(hystart_low_window, int, 0644);
MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start");
module_param(hystart_ack_delta, int, 0644);
MODULE_PARM_DESC(hystart_ack_delta, "spacing between ack's indicating train (msecs)");
/* BIC TCP Parameters */
struct bictcp {
@ -85,7 +88,7 @@ struct bictcp {
u32 last_time; /* time when updated last_cwnd */
u32 bic_origin_point;/* origin point of bic function */
u32 bic_K; /* time to origin point from the beginning of the current epoch */
u32 delay_min; /* min delay */
u32 delay_min; /* min delay (msec << 3) */
u32 epoch_start; /* beginning of an epoch */
u32 ack_cnt; /* number of acks */
u32 tcp_cwnd; /* estimated tcp cwnd */
@ -95,7 +98,7 @@ struct bictcp {
u8 found; /* the exit point is found? */
u32 round_start; /* beginning of each round */
u32 end_seq; /* end_seq of the round */
u32 last_jiffies; /* last time when the ACK spacing is close */
u32 last_ack; /* last time when the ACK spacing is close */
u32 curr_rtt; /* the minimum rtt of current round */
};
@ -116,12 +119,21 @@ static inline void bictcp_reset(struct bictcp *ca)
ca->found = 0;
}
static inline u32 bictcp_clock(void)
{
#if HZ < 1000
return ktime_to_ms(ktime_get_real());
#else
return jiffies_to_msecs(jiffies);
#endif
}
static inline void bictcp_hystart_reset(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
ca->round_start = ca->last_jiffies = jiffies;
ca->round_start = ca->last_ack = bictcp_clock();
ca->end_seq = tp->snd_nxt;
ca->curr_rtt = 0;
ca->sample_cnt = 0;
@ -236,8 +248,8 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
*/
/* change the unit from HZ to bictcp_HZ */
t = ((tcp_time_stamp + (ca->delay_min>>3) - ca->epoch_start)
<< BICTCP_HZ) / HZ;
t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3)
- ca->epoch_start) << BICTCP_HZ) / HZ;
if (t < ca->bic_K) /* t - K */
offs = ca->bic_K - t;
@ -258,6 +270,13 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
ca->cnt = 100 * cwnd; /* very small increment*/
}
/*
* The initial growth of cubic function may be too conservative
* when the available bandwidth is still unknown.
*/
if (ca->loss_cwnd == 0 && ca->cnt > 20)
ca->cnt = 20; /* increase cwnd 5% per RTT */
/* TCP Friendly */
if (tcp_friendliness) {
u32 scale = beta_scale;
@ -339,12 +358,12 @@ static void hystart_update(struct sock *sk, u32 delay)
struct bictcp *ca = inet_csk_ca(sk);
if (!(ca->found & hystart_detect)) {
u32 curr_jiffies = jiffies;
u32 now = bictcp_clock();
/* first detection parameter - ack-train detection */
if (curr_jiffies - ca->last_jiffies <= msecs_to_jiffies(2)) {
ca->last_jiffies = curr_jiffies;
if (curr_jiffies - ca->round_start >= ca->delay_min>>4)
if ((s32)(now - ca->last_ack) <= hystart_ack_delta) {
ca->last_ack = now;
if ((s32)(now - ca->round_start) > ca->delay_min >> 4)
ca->found |= HYSTART_ACK_TRAIN;
}
@ -391,7 +410,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
return;
delay = usecs_to_jiffies(rtt_us) << 3;
delay = (rtt_us << 3) / USEC_PER_MSEC;
if (delay == 0)
delay = 1;
@ -447,6 +466,10 @@ static int __init cubictcp_register(void)
/* divide by bic_scale and by constant Srtt (100ms) */
do_div(cube_factor, bic_scale * 10);
/* hystart needs ms clock resolution */
if (hystart && HZ < 1000)
cubictcp.flags |= TCP_CONG_RTT_STAMP;
return tcp_register_congestion_control(&cubictcp);
}

View File

@ -3350,7 +3350,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
net_invalid_timestamp()))
rtt_us = ktime_us_delta(ktime_get_real(),
last_ackt);
else if (ca_seq_rtt > 0)
else if (ca_seq_rtt >= 0)
rtt_us = jiffies_to_usecs(ca_seq_rtt);
}

View File

@ -1124,7 +1124,7 @@ restart:
/* Latch our state.
It is tricky place. We need to grab write lock and cannot
It is tricky place. We need to grab our state lock and cannot
drop lock on peer. It is dangerous because deadlock is
possible. Connect to self case and simultaneous
attempt to connect are eliminated by checking socket