net: Replace u64_stats_fetch_begin_bh to u64_stats_fetch_begin_irq

Replace the bh safe variant with the hard irq safe variant.

We need a hard irq safe variant to deal with netpoll transmitting
packets from hard irq context, and we need it in most if not all of
the places using the bh safe variant.

Except on 32bit uni-processor the code is exactly the same so don't
bother with a bh variant, just have a hard irq safe variant that
everyone can use.

Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
[javelinanddart]: Merge conflicts were resolved for drivers that exist in 3.4,
and additionally a treewide find and replace was run for these functions.
Signed-off-by: Paul Keith <javelinanddart@gmail.com>
Change-Id: Ib74db793de5e546414a0599f23095f82f0e20c86
This commit is contained in:
Eric W. Biederman 2014-03-13 21:26:42 -07:00 committed by surblazer
parent a559d1f125
commit ec0a45cd4a
26 changed files with 88 additions and 88 deletions

View File

@ -347,10 +347,10 @@ be_get_ethtool_stats(struct net_device *netdev,
struct be_rx_stats *stats = rx_stats(rxo);
do {
start = u64_stats_fetch_begin_bh(&stats->sync);
start = u64_stats_fetch_begin_irq(&stats->sync);
data[base] = stats->rx_bytes;
data[base + 1] = stats->rx_pkts;
} while (u64_stats_fetch_retry_bh(&stats->sync, start));
} while (u64_stats_fetch_retry_irq(&stats->sync, start));
for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
p = (u8 *)stats + et_rx_stats[i].offset;
@ -363,19 +363,19 @@ be_get_ethtool_stats(struct net_device *netdev,
struct be_tx_stats *stats = tx_stats(txo);
do {
start = u64_stats_fetch_begin_bh(&stats->sync_compl);
start = u64_stats_fetch_begin_irq(&stats->sync_compl);
data[base] = stats->tx_compl;
} while (u64_stats_fetch_retry_bh(&stats->sync_compl, start));
} while (u64_stats_fetch_retry_irq(&stats->sync_compl, start));
do {
start = u64_stats_fetch_begin_bh(&stats->sync);
start = u64_stats_fetch_begin_irq(&stats->sync);
for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
p = (u8 *)stats + et_tx_stats[i].offset;
data[base + i] =
(et_tx_stats[i].size == sizeof(u64)) ?
*(u64 *)p : *(u32 *)p;
}
} while (u64_stats_fetch_retry_bh(&stats->sync, start));
} while (u64_stats_fetch_retry_irq(&stats->sync, start));
base += ETHTOOL_TXSTATS_NUM;
}
}

View File

@ -445,10 +445,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
for_all_rx_queues(adapter, rxo, i) {
const struct be_rx_stats *rx_stats = rx_stats(rxo);
do {
start = u64_stats_fetch_begin_bh(&rx_stats->sync);
start = u64_stats_fetch_begin_irq(&rx_stats->sync);
pkts = rx_stats(rxo)->rx_pkts;
bytes = rx_stats(rxo)->rx_bytes;
} while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
} while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
stats->rx_packets += pkts;
stats->rx_bytes += bytes;
stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
@ -459,10 +459,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
for_all_tx_queues(adapter, txo, i) {
const struct be_tx_stats *tx_stats = tx_stats(txo);
do {
start = u64_stats_fetch_begin_bh(&tx_stats->sync);
start = u64_stats_fetch_begin_irq(&tx_stats->sync);
pkts = tx_stats(txo)->tx_pkts;
bytes = tx_stats(txo)->tx_bytes;
} while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
} while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
stats->tx_packets += pkts;
stats->tx_bytes += bytes;
}
@ -1086,9 +1086,9 @@ static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
return;
do {
start = u64_stats_fetch_begin_bh(&stats->sync);
start = u64_stats_fetch_begin_irq(&stats->sync);
pkts = stats->rx_pkts;
} while (u64_stats_fetch_retry_bh(&stats->sync, start));
} while (u64_stats_fetch_retry_irq(&stats->sync, start));
stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
stats->rx_pkts_prev = pkts;

View File

@ -2094,15 +2094,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
ring = adapter->tx_ring[j];
do {
start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
data[i] = ring->tx_stats.packets;
data[i+1] = ring->tx_stats.bytes;
data[i+2] = ring->tx_stats.restart_queue;
} while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
do {
start = u64_stats_fetch_begin_bh(&ring->tx_syncp2);
start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
restart2 = ring->tx_stats.restart_queue2;
} while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start));
} while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
data[i+2] += restart2;
i += IGB_TX_QUEUE_STATS_LEN;
@ -2110,13 +2110,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
for (j = 0; j < adapter->num_rx_queues; j++) {
ring = adapter->rx_ring[j];
do {
start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
data[i] = ring->rx_stats.packets;
data[i+1] = ring->rx_stats.bytes;
data[i+2] = ring->rx_stats.drops;
data[i+3] = ring->rx_stats.csum_err;
data[i+4] = ring->rx_stats.alloc_failed;
} while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
i += IGB_RX_QUEUE_STATS_LEN;
}
spin_unlock(&adapter->stats64_lock);

View File

@ -4661,10 +4661,10 @@ void igb_update_stats(struct igb_adapter *adapter,
}
do {
start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
_bytes = ring->rx_stats.bytes;
_packets = ring->rx_stats.packets;
} while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
bytes += _bytes;
packets += _packets;
}
@ -4677,10 +4677,10 @@ void igb_update_stats(struct igb_adapter *adapter,
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *ring = adapter->tx_ring[i];
do {
start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
_bytes = ring->tx_stats.bytes;
_packets = ring->tx_stats.packets;
} while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
bytes += _bytes;
packets += _packets;
}

View File

@ -1093,10 +1093,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
start = u64_stats_fetch_begin_irq(&ring->syncp);
data[i] = ring->stats.packets;
data[i+1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
}
for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
@ -1109,10 +1109,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
start = u64_stats_fetch_begin_irq(&ring->syncp);
data[i] = ring->stats.packets;
data[i+1] = ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
}

View File

@ -6484,10 +6484,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
if (ring) {
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
start = u64_stats_fetch_begin_irq(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
}
@ -6500,10 +6500,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
if (ring) {
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
start = u64_stats_fetch_begin_irq(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
}

View File

@ -3231,10 +3231,10 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++) {
ring = &adapter->rx_ring[i];
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
start = u64_stats_fetch_begin_irq(&ring->syncp);
bytes = ring->total_bytes;
packets = ring->total_packets;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->rx_bytes += bytes;
stats->rx_packets += packets;
}
@ -3242,10 +3242,10 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
for (i = 0; i < adapter->num_tx_queues; i++) {
ring = &adapter->tx_ring[i];
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
start = u64_stats_fetch_begin_irq(&ring->syncp);
bytes = ring->total_bytes;
packets = ring->total_packets;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->tx_bytes += bytes;
stats->tx_packets += packets;
}

View File

@ -3892,19 +3892,19 @@ static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
u64 _bytes, _packets;
do {
start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp);
start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp);
_bytes = sky2->rx_stats.bytes;
_packets = sky2->rx_stats.packets;
} while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start));
} while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start));
stats->rx_packets = _packets;
stats->rx_bytes = _bytes;
do {
start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp);
start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp);
_bytes = sky2->tx_stats.bytes;
_packets = sky2->tx_stats.packets;
} while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start));
} while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start));
stats->tx_packets = _packets;
stats->tx_bytes = _bytes;

View File

@ -1755,19 +1755,19 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
/* software stats */
do {
syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp);
syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
storage->rx_packets = np->stat_rx_packets;
storage->rx_bytes = np->stat_rx_bytes;
storage->rx_dropped = np->stat_rx_dropped;
storage->rx_missed_errors = np->stat_rx_missed_errors;
} while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start));
} while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
do {
syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp);
syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
storage->tx_packets = np->stat_tx_packets;
storage->tx_bytes = np->stat_tx_bytes;
storage->tx_dropped = np->stat_tx_dropped;
} while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start));
} while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
/* If the nic supports hw counters then retrieve latest values */
if (np->driver_data & DEV_HAS_STATISTICS_V123) {

View File

@ -2550,16 +2550,16 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
netdev_stats_to_stats64(stats, &dev->stats);
do {
start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
stats->rx_packets = tp->rx_stats.packets;
stats->rx_bytes = tp->rx_stats.bytes;
} while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
} while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
do {
start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
stats->tx_packets = tp->tx_stats.packets;
stats->tx_bytes = tp->tx_stats.bytes;
} while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
} while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
return stats;
}

View File

@ -5790,17 +5790,17 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
rtl8169_rx_missed(dev, ioaddr);
do {
start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
stats->rx_packets = tp->rx_stats.packets;
stats->rx_bytes = tp->rx_stats.bytes;
} while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
} while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
do {
start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
stats->tx_packets = tp->tx_stats.packets;
stats->tx_bytes = tp->tx_stats.bytes;
} while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
} while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;

View File

@ -136,18 +136,18 @@ static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
unsigned int start;
do {
start = u64_stats_fetch_begin_bh(&dp->rsync);
start = u64_stats_fetch_begin_irq(&dp->rsync);
stats->rx_packets = dp->rx_packets;
stats->rx_bytes = dp->rx_bytes;
} while (u64_stats_fetch_retry_bh(&dp->rsync, start));
} while (u64_stats_fetch_retry_irq(&dp->rsync, start));
do {
start = u64_stats_fetch_begin_bh(&dp->tsync);
start = u64_stats_fetch_begin_irq(&dp->tsync);
stats->tx_packets = dp->tx_packets;
stats->tx_bytes = dp->tx_bytes;
} while (u64_stats_fetch_retry_bh(&dp->tsync, start));
} while (u64_stats_fetch_retry_irq(&dp->tsync, start));
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;

View File

@ -505,13 +505,13 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
for_each_possible_cpu(i) {
p = per_cpu_ptr(vlan->pcpu_stats, i);
do {
start = u64_stats_fetch_begin_bh(&p->syncp);
start = u64_stats_fetch_begin_irq(&p->syncp);
rx_packets = p->rx_packets;
rx_bytes = p->rx_bytes;
rx_multicast = p->rx_multicast;
tx_packets = p->tx_packets;
tx_bytes = p->tx_bytes;
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;

View File

@ -931,13 +931,13 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
for_each_possible_cpu(i) {
p = per_cpu_ptr(team->pcpu_stats, i);
do {
start = u64_stats_fetch_begin_bh(&p->syncp);
start = u64_stats_fetch_begin_irq(&p->syncp);
rx_packets = p->rx_packets;
rx_bytes = p->rx_bytes;
rx_multicast = p->rx_multicast;
tx_packets = p->tx_packets;
tx_bytes = p->tx_bytes;
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;

View File

@ -171,13 +171,13 @@ static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
unsigned int start;
do {
start = u64_stats_fetch_begin_bh(&stats->syncp);
start = u64_stats_fetch_begin_irq(&stats->syncp);
rx_packets = stats->rx_packets;
tx_packets = stats->tx_packets;
rx_bytes = stats->rx_bytes;
tx_bytes = stats->tx_bytes;
rx_dropped = stats->rx_dropped;
} while (u64_stats_fetch_retry_bh(&stats->syncp, start));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
tot->rx_packets += rx_packets;
tot->tx_packets += tx_packets;
tot->rx_bytes += rx_bytes;

View File

@ -1076,13 +1076,13 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
unsigned int start;
do {
start = u64_stats_fetch_begin_bh(&stats->syncp);
start = u64_stats_fetch_begin_irq(&stats->syncp);
rx_packets = stats->rx_packets;
tx_packets = stats->tx_packets;
rx_bytes = stats->rx_bytes;
tx_bytes = stats->tx_bytes;
} while (u64_stats_fetch_retry_bh(&stats->syncp, start));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
tot->rx_packets += rx_packets;
tot->tx_packets += tx_packets;

View File

@ -27,8 +27,8 @@
* (On UP, there is no seqcount_t protection, a reader allowing interrupts could
* read partial values)
*
* 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and
* u64_stats_fetch_retry_bh() helpers
* 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and
* u64_stats_fetch_retry_irq() helpers
*
* Usage :
*
@ -114,31 +114,31 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
}
/*
* In case softirq handlers can update u64 counters, readers can use following helpers
* In case irq handlers can update u64 counters, readers can use following helpers
* - SMP 32bit arches use seqcount protection, irq safe.
* - UP 32bit must disable BH.
* - UP 32bit must disable irqs.
* - 64bit have no problem atomically reading u64 values, irq safe.
*/
static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_begin(&syncp->seq);
#else
#if BITS_PER_LONG==32
local_bh_disable();
local_irq_disable();
#endif
return 0;
#endif
}
static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
unsigned int start)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_retry(&syncp->seq, start);
#else
#if BITS_PER_LONG==32
local_bh_enable();
local_irq_enable();
#endif
return false;
#endif

View File

@ -674,13 +674,13 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
do {
start = u64_stats_fetch_begin_bh(&p->syncp);
start = u64_stats_fetch_begin_irq(&p->syncp);
rxpackets = p->rx_packets;
rxbytes = p->rx_bytes;
rxmulticast = p->rx_multicast;
txpackets = p->tx_packets;
txbytes = p->tx_bytes;
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += rxpackets;
stats->rx_bytes += rxbytes;

View File

@ -1504,9 +1504,9 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
bhptr = per_cpu_ptr(mib[0], cpu);
syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
do {
start = u64_stats_fetch_begin_bh(syncp);
start = u64_stats_fetch_begin_irq(syncp);
v = *(((u64 *) bhptr) + offt);
} while (u64_stats_fetch_retry_bh(syncp, start));
} while (u64_stats_fetch_retry_irq(syncp, start));
res += v;
}

View File

@ -187,12 +187,12 @@ static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
unsigned int start;
do {
start = u64_stats_fetch_begin_bh(&tstats->syncp);
start = u64_stats_fetch_begin_irq(&tstats->syncp);
rx_packets = tstats->rx_packets;
tx_packets = tstats->tx_packets;
rx_bytes = tstats->rx_bytes;
tx_bytes = tstats->tx_bytes;
} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
tot->rx_packets += rx_packets;
tot->tx_packets += tx_packets;

View File

@ -162,12 +162,12 @@ static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
unsigned int start;
do {
start = u64_stats_fetch_begin_bh(&tstats->syncp);
start = u64_stats_fetch_begin_irq(&tstats->syncp);
rx_packets = tstats->rx_packets;
tx_packets = tstats->tx_packets;
rx_bytes = tstats->rx_bytes;
tx_bytes = tstats->tx_bytes;
} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
tot->rx_packets += rx_packets;
tot->tx_packets += tx_packets;

View File

@ -106,12 +106,12 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
do {
start = u64_stats_fetch_begin_bh(&tstats->syncp);
start = u64_stats_fetch_begin_irq(&tstats->syncp);
tmp.rx_packets = tstats->rx_packets;
tmp.rx_bytes = tstats->rx_bytes;
tmp.tx_packets = tstats->tx_packets;
tmp.tx_bytes = tstats->tx_bytes;
} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
sum.rx_packets += tmp.rx_packets;
sum.rx_bytes += tmp.rx_bytes;

View File

@ -105,12 +105,12 @@ static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev,
unsigned int start;
do {
start = u64_stats_fetch_begin_bh(&tstats->syncp);
start = u64_stats_fetch_begin_irq(&tstats->syncp);
rx_packets = tstats->rx_packets;
tx_packets = tstats->tx_packets;
rx_bytes = tstats->rx_bytes;
tx_bytes = tstats->tx_bytes;
} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
tot->rx_packets += rx_packets;
tot->tx_packets += tx_packets;

View File

@ -2143,10 +2143,10 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
__u64 inbytes, outbytes;
do {
start = u64_stats_fetch_begin_bh(&u->syncp);
start = u64_stats_fetch_begin_irq(&u->syncp);
inbytes = u->ustats.inbytes;
outbytes = u->ustats.outbytes;
} while (u64_stats_fetch_retry_bh(&u->syncp, start));
} while (u64_stats_fetch_retry_irq(&u->syncp, start));
seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n",
i, u->ustats.conns, u->ustats.inpkts,

View File

@ -722,9 +722,9 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
do {
start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
start = u64_stats_fetch_begin_irq(&percpu_stats->sync);
local_stats = *percpu_stats;
} while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
} while (u64_stats_fetch_retry_irq(&percpu_stats->sync, start));
stats->n_hit += local_stats.n_hit;
stats->n_missed += local_stats.n_missed;

View File

@ -273,9 +273,9 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
do {
start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
start = u64_stats_fetch_begin_irq(&percpu_stats->sync);
local_stats = *percpu_stats;
} while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
} while (u64_stats_fetch_retry_irq(&percpu_stats->sync, start));
stats->rx_bytes += local_stats.rx_bytes;
stats->rx_packets += local_stats.rx_packets;