mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
net: Explicitly initialize u64_stats_sync structures for lockdep
In order to enable lockdep on seqcount/seqlock structures, we must explicitly initialize any locks. The u64_stats_sync structure, uses a seqcount, and thus we need to introduce a u64_stats_init() function and use it to initialize the structure. This unfortunately adds a lot of fairly trivial initialization code to a number of drivers. But the benefit of ensuring correctness makes this worth while. Because these changes are required for lockdep to be enabled, and the changes are quite trivial, I've not yet split this patch out into 30-some separate patches, as I figured it would be better to get the various maintainers thoughts on how to best merge this change along with the seqcount lockdep enablement. Feedback would be appreciated! Signed-off-by: John Stultz <john.stultz@linaro.org> Acked-by: Julian Anastasov <ja@ssi.bg> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> Cc: "David S. Miller" <davem@davemloft.net> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org> Cc: James Morris <jmorris@namei.org> Cc: Jesse Gross <jesse@nicira.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Mirko Lindner <mlindner@marvell.com> Cc: Patrick McHardy <kaber@trash.net> Cc: Roger Luethi <rl@hellgate.ch> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Simon Horman <horms@verge.net.au> Cc: Stephen Hemminger <stephen@networkplumber.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> Cc: Wensong Zhang <wensong@linux-vs.org> Cc: netdev@vger.kernel.org Link: http://lkml.kernel.org/r/1381186321-4906-2-git-send-email-john.stultz@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org> Change-Id: Ieda06b95f0ec302dbef8576ef4c8fb4bd28ffb2f
This commit is contained in:
parent
39b326bec0
commit
ad74346428
28 changed files with 200 additions and 4 deletions
|
@ -100,10 +100,16 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
static int dummy_dev_init(struct net_device *dev)
|
||||
{
|
||||
int i;
|
||||
dev->dstats = alloc_percpu(struct pcpu_dstats);
|
||||
if (!dev->dstats)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct pcpu_dstats *dstats;
|
||||
dstats = per_cpu_ptr(dev->dstats, i);
|
||||
u64_stats_init(&dstats->syncp);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1823,6 +1823,9 @@ static int be_tx_cqs_create(struct be_adapter *adapter)
|
|||
if (status)
|
||||
return status;
|
||||
|
||||
u64_stats_init(&txo->stats.sync);
|
||||
u64_stats_init(&txo->stats.sync_compl);
|
||||
|
||||
/* If num_evt_qs is less than num_tx_qs, then more than
|
||||
* one txq share an eq
|
||||
*/
|
||||
|
@ -1888,6 +1891,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
u64_stats_init(&rxo->stats.sync);
|
||||
eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
|
||||
rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
|
||||
if (rc)
|
||||
|
|
|
@ -728,6 +728,8 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
|
|||
/* For 82575, context index must be unique per ring. */
|
||||
if (adapter->hw.mac.type == e1000_82575)
|
||||
set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
|
||||
u64_stats_init(&ring->tx_syncp);
|
||||
u64_stats_init(&ring->tx_syncp2);
|
||||
adapter->tx_ring[i] = ring;
|
||||
}
|
||||
/* Restore the adapter's original node */
|
||||
|
@ -759,6 +761,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
|
|||
if (adapter->hw.mac.type == e1000_i350)
|
||||
set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
|
||||
|
||||
u64_stats_init(&ring->rx_syncp);
|
||||
adapter->rx_ring[i] = ring;
|
||||
}
|
||||
/* Restore the adapter's original node */
|
||||
|
|
|
@ -4497,6 +4497,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
|
|||
if (!tx_ring->tx_buffer_info)
|
||||
goto err;
|
||||
|
||||
u64_stats_init(&tx_ring->syncp);
|
||||
|
||||
/* round up to nearest 4K */
|
||||
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
|
||||
tx_ring->size = ALIGN(tx_ring->size, 4096);
|
||||
|
@ -4573,6 +4575,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
|
|||
if (!rx_ring->rx_buffer_info)
|
||||
goto err;
|
||||
|
||||
u64_stats_init(&rx_ring->syncp);
|
||||
|
||||
/* Round up to nearest 4K */
|
||||
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
|
||||
rx_ring->size = ALIGN(rx_ring->size, 4096);
|
||||
|
|
|
@ -4750,6 +4750,9 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
|
|||
sky2->hw = hw;
|
||||
sky2->msg_enable = netif_msg_init(debug, default_msg);
|
||||
|
||||
u64_stats_init(&sky2->tx_stats.syncp);
|
||||
u64_stats_init(&sky2->rx_stats.syncp);
|
||||
|
||||
/* Auto speed and flow control */
|
||||
sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE;
|
||||
if (hw->chip_id != CHIP_ID_YUKON_XL)
|
||||
|
|
|
@ -2073,6 +2073,10 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
|
|||
vdev->config.tx_steering_type;
|
||||
vpath->fifo.ndev = vdev->ndev;
|
||||
vpath->fifo.pdev = vdev->pdev;
|
||||
|
||||
u64_stats_init(&vpath->fifo.stats.syncp);
|
||||
u64_stats_init(&vpath->ring.stats.syncp);
|
||||
|
||||
if (vdev->config.tx_steering_type)
|
||||
vpath->fifo.txq =
|
||||
netdev_get_tx_queue(vdev->ndev, i);
|
||||
|
|
|
@ -5544,6 +5544,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
|
|||
spin_lock_init(&np->lock);
|
||||
spin_lock_init(&np->hwstats_lock);
|
||||
SET_NETDEV_DEV(dev, &pci_dev->dev);
|
||||
u64_stats_init(&np->swstats_rx_syncp);
|
||||
u64_stats_init(&np->swstats_tx_syncp);
|
||||
|
||||
init_timer(&np->oom_kick);
|
||||
np->oom_kick.data = (unsigned long) dev;
|
||||
|
|
|
@ -828,6 +828,9 @@ retry:
|
|||
/* enable PCI bus-mastering */
|
||||
pci_set_master (pdev);
|
||||
|
||||
u64_stats_init(&tp->rx_stats.syncp);
|
||||
u64_stats_init(&tp->tx_stats.syncp);
|
||||
|
||||
if (use_io) {
|
||||
ioaddr = pci_iomap(pdev, 0, 0);
|
||||
if (!ioaddr) {
|
||||
|
|
|
@ -1093,6 +1093,8 @@ static void tile_net_register(void *dev_ptr)
|
|||
info->egress_timer.data = (long)info;
|
||||
info->egress_timer.function = tile_net_handle_egress_timer;
|
||||
|
||||
u64_stats_init(&info->stats.syncp);
|
||||
|
||||
priv->cpu[my_cpu] = info;
|
||||
|
||||
/*
|
||||
|
|
|
@ -975,6 +975,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
|
|||
dev->base_addr = (unsigned long)ioaddr;
|
||||
rp->base = ioaddr;
|
||||
|
||||
u64_stats_init(&rp->tx_stats.syncp);
|
||||
u64_stats_init(&rp->rx_stats.syncp);
|
||||
|
||||
/* Get chip registers into a sane state */
|
||||
rhine_power_init(dev);
|
||||
rhine_hw_init(dev, pioaddr);
|
||||
|
|
|
@ -264,6 +264,7 @@ MODULE_PARM_DESC(numifbs, "Number of ifb devices");
|
|||
static int __init ifb_init_one(int index)
|
||||
{
|
||||
struct net_device *dev_ifb;
|
||||
struct ifb_private *dp;
|
||||
int err;
|
||||
|
||||
dev_ifb = alloc_netdev(sizeof(struct ifb_private),
|
||||
|
@ -272,6 +273,10 @@ static int __init ifb_init_one(int index)
|
|||
if (!dev_ifb)
|
||||
return -ENOMEM;
|
||||
|
||||
dp = netdev_priv(dev_ifb);
|
||||
u64_stats_init(&dp->rsync);
|
||||
u64_stats_init(&dp->tsync);
|
||||
|
||||
dev_ifb->rtnl_link_ops = &ifb_link_ops;
|
||||
err = register_netdevice(dev_ifb);
|
||||
if (err < 0)
|
||||
|
|
|
@ -137,10 +137,16 @@ static const struct ethtool_ops loopback_ethtool_ops = {
|
|||
|
||||
static int loopback_dev_init(struct net_device *dev)
|
||||
{
|
||||
int i;
|
||||
dev->lstats = alloc_percpu(struct pcpu_lstats);
|
||||
if (!dev->lstats)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct pcpu_lstats *lb_stats;
|
||||
lb_stats = per_cpu_ptr(dev->lstats, i);
|
||||
u64_stats_init(&lb_stats->syncp);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -452,6 +452,7 @@ static int macvlan_init(struct net_device *dev)
|
|||
{
|
||||
struct macvlan_dev *vlan = netdev_priv(dev);
|
||||
const struct net_device *lowerdev = vlan->lowerdev;
|
||||
int i;
|
||||
|
||||
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
|
||||
(lowerdev->state & MACVLAN_STATE_MASK);
|
||||
|
@ -468,6 +469,12 @@ static int macvlan_init(struct net_device *dev)
|
|||
if (!vlan->pcpu_stats)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct macvlan_pcpu_stats *mvlstats;
|
||||
mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
|
||||
u64_stats_init(&mvlstats->syncp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -747,6 +747,12 @@ static int team_init(struct net_device *dev)
|
|||
if (!team->pcpu_stats)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct team_pcpu_stats *team_stats;
|
||||
team_stats = per_cpu_ptr(team->pcpu_stats, i);
|
||||
u64_stats_init(&team_stats->syncp);
|
||||
}
|
||||
|
||||
for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
|
||||
INIT_HLIST_HEAD(&team->port_hlist[i]);
|
||||
INIT_LIST_HEAD(&team->port_list);
|
||||
|
|
|
@ -230,6 +230,7 @@ static int veth_dev_init(struct net_device *dev)
|
|||
{
|
||||
struct veth_net_stats __percpu *stats;
|
||||
struct veth_priv *priv;
|
||||
int i;
|
||||
|
||||
stats = alloc_percpu(struct veth_net_stats);
|
||||
if (stats == NULL)
|
||||
|
@ -237,6 +238,13 @@ static int veth_dev_init(struct net_device *dev)
|
|||
|
||||
priv = netdev_priv(dev);
|
||||
priv->stats = stats;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct pcpu_vstats *veth_stats;
|
||||
veth_stats = per_cpu_ptr(dev->vstats, i);
|
||||
u64_stats_init(&veth_stats->syncp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1077,6 +1077,13 @@ static int virtnet_probe(struct virtio_device *vdev)
|
|||
if (vi->stats == NULL)
|
||||
goto free;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct virtnet_stats *virtnet_stats;
|
||||
virtnet_stats = per_cpu_ptr(vi->stats, i);
|
||||
u64_stats_init(&virtnet_stats->tx_syncp);
|
||||
u64_stats_init(&virtnet_stats->rx_syncp);
|
||||
}
|
||||
|
||||
INIT_DELAYED_WORK(&vi->refill, refill_work);
|
||||
sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
|
||||
sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
|
||||
|
|
|
@ -1314,6 +1314,12 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
|
|||
if (np->stats == NULL)
|
||||
goto exit;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct netfront_stats *xen_nf_stats;
|
||||
xen_nf_stats = per_cpu_ptr(np->stats, i);
|
||||
u64_stats_init(&xen_nf_stats->syncp);
|
||||
}
|
||||
|
||||
/* Initialise tx_skbs as a free chain containing every entry. */
|
||||
np->tx_skb_freelist = 0;
|
||||
for (i = 0; i < NET_TX_RING_SIZE; i++) {
|
||||
|
|
|
@ -67,6 +67,13 @@ struct u64_stats_sync {
|
|||
#endif
|
||||
};
|
||||
|
||||
|
||||
#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
|
||||
# define u64_stats_init(syncp) seqcount_init(syncp.seq)
|
||||
#else
|
||||
# define u64_stats_init(syncp) do { } while (0)
|
||||
#endif
|
||||
|
||||
static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
|
||||
{
|
||||
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
||||
|
|
|
@ -550,7 +550,7 @@ static const struct net_device_ops vlan_netdev_ops;
|
|||
static int vlan_dev_init(struct net_device *dev)
|
||||
{
|
||||
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
|
||||
int subclass = 0;
|
||||
int subclass = 0, i;
|
||||
|
||||
netif_carrier_off(dev);
|
||||
|
||||
|
@ -602,6 +602,13 @@ static int vlan_dev_init(struct net_device *dev)
|
|||
if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct vlan_pcpu_stats *vlan_stat;
|
||||
vlan_stat = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
|
||||
u64_stats_init(&vlan_stat->syncp);
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -80,11 +80,18 @@ out:
|
|||
static int br_dev_init(struct net_device *dev)
|
||||
{
|
||||
struct net_bridge *br = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
br->stats = alloc_percpu(struct br_cpu_netstats);
|
||||
if (!br->stats)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct br_cpu_netstats *br_dev_stats;
|
||||
br_dev_stats = per_cpu_ptr(br->stats, i);
|
||||
u64_stats_init(&br_dev_stats->syncp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1521,6 +1521,7 @@ int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
|
|||
ptr[0] = __alloc_percpu(mibsize, align);
|
||||
if (!ptr[0])
|
||||
return -ENOMEM;
|
||||
|
||||
#if SNMP_ARRAY_SZ == 2
|
||||
ptr[1] = __alloc_percpu(mibsize, align);
|
||||
if (!ptr[1]) {
|
||||
|
@ -1581,6 +1582,8 @@ static const struct net_protocol icmp_protocol = {
|
|||
|
||||
static __net_init int ipv4_mib_init_net(struct net *net)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
|
||||
sizeof(struct tcp_mib),
|
||||
__alignof__(struct tcp_mib)) < 0)
|
||||
|
@ -1589,6 +1592,17 @@ static __net_init int ipv4_mib_init_net(struct net *net)
|
|||
sizeof(struct ipstats_mib),
|
||||
__alignof__(struct ipstats_mib)) < 0)
|
||||
goto err_ip_mib;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct ipstats_mib *af_inet_stats;
|
||||
af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[0], i);
|
||||
u64_stats_init(&af_inet_stats->syncp);
|
||||
#if SNMP_ARRAY_SZ == 2
|
||||
af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[1], i);
|
||||
u64_stats_init(&af_inet_stats->syncp);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
|
||||
sizeof(struct linux_mib),
|
||||
__alignof__(struct linux_mib)) < 0)
|
||||
|
|
|
@ -290,10 +290,24 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp,
|
|||
|
||||
static int snmp6_alloc_dev(struct inet6_dev *idev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (snmp_mib_init((void __percpu **)idev->stats.ipv6,
|
||||
sizeof(struct ipstats_mib),
|
||||
__alignof__(struct ipstats_mib)) < 0)
|
||||
goto err_ip;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct ipstats_mib *addrconf_stats;
|
||||
addrconf_stats = per_cpu_ptr(idev->stats.ipv6[0], i);
|
||||
u64_stats_init(&addrconf_stats->syncp);
|
||||
#if SNMP_ARRAY_SZ == 2
|
||||
addrconf_stats = per_cpu_ptr(idev->stats.ipv6[1], i);
|
||||
u64_stats_init(&addrconf_stats->syncp);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
|
||||
GFP_KERNEL);
|
||||
if (!idev->stats.icmpv6dev)
|
||||
|
|
|
@ -1018,6 +1018,8 @@ static void ipv6_packet_cleanup(void)
|
|||
|
||||
static int __net_init ipv6_init_mibs(struct net *net)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6,
|
||||
sizeof(struct udp_mib),
|
||||
__alignof__(struct udp_mib)) < 0)
|
||||
|
@ -1030,6 +1032,18 @@ static int __net_init ipv6_init_mibs(struct net *net)
|
|||
sizeof(struct ipstats_mib),
|
||||
__alignof__(struct ipstats_mib)) < 0)
|
||||
goto err_ip_mib;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct ipstats_mib *af_inet6_stats;
|
||||
af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[0], i);
|
||||
u64_stats_init(&af_inet6_stats->syncp);
|
||||
#if SNMP_ARRAY_SZ == 2
|
||||
af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[1], i);
|
||||
u64_stats_init(&af_inet6_stats->syncp);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics,
|
||||
sizeof(struct icmpv6_mib),
|
||||
__alignof__(struct icmpv6_mib)) < 0)
|
||||
|
|
|
@ -1408,11 +1408,18 @@ static inline int
|
|||
ip6_tnl_dev_init_gen(struct net_device *dev)
|
||||
{
|
||||
struct ip6_tnl *t = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
t->dev = dev;
|
||||
dev->tstats = alloc_percpu(struct pcpu_tstats);
|
||||
if (!dev->tstats)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct pcpu_tstats *ip6_tnl_stats;
|
||||
ip6_tnl_stats = per_cpu_ptr(dev->tstats, i);
|
||||
u64_stats_init(&ip6_tnl_stats->syncp);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1170,6 +1170,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
|
|||
static int ipip6_tunnel_init(struct net_device *dev)
|
||||
{
|
||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
tunnel->dev = dev;
|
||||
|
||||
|
@ -1181,6 +1182,12 @@ static int ipip6_tunnel_init(struct net_device *dev)
|
|||
if (!dev->tstats)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct pcpu_tstats *ipip6_tunnel_stats;
|
||||
ipip6_tunnel_stats = per_cpu_ptr(dev->tstats, i);
|
||||
u64_stats_init(&ipip6_tunnel_stats->syncp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1190,6 +1197,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
|
|||
struct iphdr *iph = &tunnel->parms.iph;
|
||||
struct net *net = dev_net(dev);
|
||||
struct sit_net *sitn = net_generic(net, sit_net_id);
|
||||
int i;
|
||||
|
||||
tunnel->dev = dev;
|
||||
strcpy(tunnel->parms.name, dev->name);
|
||||
|
@ -1202,6 +1210,13 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
|
|||
dev->tstats = alloc_percpu(struct pcpu_tstats);
|
||||
if (!dev->tstats)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct pcpu_tstats *ipip6_fb_stats;
|
||||
ipip6_fb_stats = per_cpu_ptr(dev->tstats, i);
|
||||
u64_stats_init(&ipip6_fb_stats->syncp);
|
||||
}
|
||||
|
||||
dev_hold(dev);
|
||||
rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
|
||||
return 0;
|
||||
|
|
|
@ -842,7 +842,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
|
|||
struct ip_vs_dest **dest_p)
|
||||
{
|
||||
struct ip_vs_dest *dest;
|
||||
unsigned atype;
|
||||
unsigned int atype, i;
|
||||
|
||||
EnterFunction(2);
|
||||
|
||||
|
@ -869,6 +869,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
|
|||
if (!dest->stats.cpustats)
|
||||
goto err_alloc;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct ip_vs_cpu_stats *ip_vs_dest_stats;
|
||||
ip_vs_dest_stats = per_cpu_ptr(dest->stats.cpustats, i);
|
||||
u64_stats_init(&ip_vs_dest_stats->syncp);
|
||||
}
|
||||
|
||||
dest->af = svc->af;
|
||||
dest->protocol = svc->protocol;
|
||||
dest->vaddr = svc->addr;
|
||||
|
@ -1130,7 +1136,7 @@ static int
|
|||
ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
|
||||
struct ip_vs_service **svc_p)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret = 0, i;
|
||||
struct ip_vs_scheduler *sched = NULL;
|
||||
struct ip_vs_pe *pe = NULL;
|
||||
struct ip_vs_service *svc = NULL;
|
||||
|
@ -1174,6 +1180,12 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
|
|||
if (!svc->stats.cpustats)
|
||||
goto out_err;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct ip_vs_cpu_stats *ip_vs_stats;
|
||||
ip_vs_stats = per_cpu_ptr(svc->stats.cpustats, i);
|
||||
u64_stats_init(&ip_vs_stats->syncp);
|
||||
}
|
||||
|
||||
/* I'm the first user of the service */
|
||||
atomic_set(&svc->usecnt, 0);
|
||||
atomic_set(&svc->refcnt, 0);
|
||||
|
@ -3625,7 +3637,7 @@ static void ip_vs_genl_unregister(void)
|
|||
#ifdef CONFIG_SYSCTL
|
||||
int __net_init ip_vs_control_net_init_sysctl(struct net *net)
|
||||
{
|
||||
int idx;
|
||||
int i, idx;
|
||||
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||
struct ctl_table *tbl;
|
||||
|
||||
|
@ -3725,6 +3737,12 @@ int __net_init ip_vs_control_net_init(struct net *net)
|
|||
if (!ipvs->tot_stats.cpustats)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct ip_vs_cpu_stats *ipvs_tot_stats;
|
||||
ipvs_tot_stats = per_cpu_ptr(ipvs->tot_stats.cpustats, i);
|
||||
u64_stats_init(&ipvs_tot_stats->syncp);
|
||||
}
|
||||
|
||||
spin_lock_init(&ipvs->tot_stats.lock);
|
||||
|
||||
proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops);
|
||||
|
|
|
@ -1261,6 +1261,12 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
|
|||
goto err_destroy_table;
|
||||
}
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct dp_stats_percpu *dpath_stats;
|
||||
dpath_stats = per_cpu_ptr(dp->stats_percpu, i);
|
||||
u64_stats_init(&dpath_stats->sync);
|
||||
}
|
||||
|
||||
/* Set up our datapath device. */
|
||||
parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
|
||||
parms.type = OVS_VPORT_TYPE_INTERNAL;
|
||||
|
|
|
@ -109,6 +109,7 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
|
|||
{
|
||||
struct vport *vport;
|
||||
size_t alloc_size;
|
||||
int i;
|
||||
|
||||
alloc_size = sizeof(struct vport);
|
||||
if (priv_size) {
|
||||
|
@ -131,6 +132,13 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
|
|||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct pcpu_tstats *vport_stats;
|
||||
vport_stats = per_cpu_ptr(vport->percpu_stats, i);
|
||||
u64_stats_init(&vport_stats->syncp);
|
||||
}
|
||||
|
||||
|
||||
spin_lock_init(&vport->stats_lock);
|
||||
|
||||
return vport;
|
||||
|
|
Loading…
Reference in a new issue