1
0
Fork 0
mirror of https://github.com/followmsi/android_kernel_google_msm.git synced 2024-11-06 23:17:41 +00:00

net: usb: Using workqueue as bottom half handler

usbnet driver uses tasklet as bottom half handler. Since tasklet
runs in interrupt context in case of high throughput driver is
spending more time in interrupt context to process rx buffers
this is causing watch dog reset. Hence replace the tasklet with
workqueue to avoid watchdog reset.

CRs-Fixed: 378526
Change-Id: I8eac339e37d734dbaaf7a2e874d3974f562e8680
Signed-off-by: Hemant Kumar <hemantk@codeaurora.org>
This commit is contained in:
Hemant Kumar 2012-07-02 13:39:22 -07:00 committed by Stephen Boyd
parent 8fc47f5dd1
commit 1212bea9dd
2 changed files with 32 additions and 15 deletions
drivers/net/usb
include/linux/usb

View file

@ -86,6 +86,8 @@ static u8 node_id [ETH_ALEN];
static const char driver_name [] = "usbnet";
static struct workqueue_struct *usbnet_wq;
/* use ethtool to change the level for any given device */
static int msg_level = -1;
module_param (msg_level, int, 0);
@ -245,7 +247,7 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
if (skb_defer_rx_timestamp(skb))
return;
status = netif_rx (skb);
status = netif_rx_ni(skb);
if (status != NET_RX_SUCCESS)
netif_dbg(dev, rx_err, dev->net,
"netif_rx status %d\n", status);
@ -315,7 +317,7 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
spin_lock(&dev->done.lock);
__skb_queue_tail(&dev->done, skb);
if (dev->done.qlen == 1)
tasklet_schedule(&dev->bh);
queue_work(usbnet_wq, &dev->bh_w);
spin_unlock_irqrestore(&dev->done.lock, flags);
return old_state;
}
@ -386,7 +388,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
default:
netif_dbg(dev, rx_err, dev->net,
"rx submit, %d\n", retval);
tasklet_schedule (&dev->bh);
queue_work(usbnet_wq, &dev->bh_w);
break;
case 0:
usb_mark_last_busy(dev->udev);
@ -579,7 +581,7 @@ void usbnet_resume_rx(struct usbnet *dev)
num++;
}
tasklet_schedule(&dev->bh);
queue_work(usbnet_wq, &dev->bh_w);
netif_dbg(dev, rx_status, dev->net,
"paused rx queue disabled, %d skbs requeued\n", num);
@ -648,7 +650,7 @@ void usbnet_unlink_rx_urbs(struct usbnet *dev)
{
if (netif_running(dev->net)) {
(void) unlink_urbs (dev, &dev->rxq);
tasklet_schedule(&dev->bh);
queue_work(usbnet_wq, &dev->bh_w);
}
}
EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
@ -722,7 +724,7 @@ int usbnet_stop (struct net_device *net)
*/
dev->flags = 0;
del_timer_sync (&dev->delay);
tasklet_kill (&dev->bh);
cancel_work_sync(&dev->bh_w);
if (info->manage_power)
info->manage_power(dev, 0);
else
@ -795,7 +797,7 @@ int usbnet_open (struct net_device *net)
"simple");
// delay posting reads until we're fully open
tasklet_schedule (&dev->bh);
queue_work(usbnet_wq, &dev->bh_w);
if (info->manage_power) {
retval = info->manage_power(dev, 1);
if (retval < 0)
@ -965,7 +967,7 @@ fail_halt:
status);
} else {
clear_bit (EVENT_RX_HALT, &dev->flags);
tasklet_schedule (&dev->bh);
queue_work(usbnet_wq, &dev->bh_w);
}
}
@ -990,7 +992,7 @@ fail_halt:
usb_autopm_put_interface(dev->intf);
fail_lowmem:
if (resched)
tasklet_schedule (&dev->bh);
queue_work(usbnet_wq, &dev->bh_w);
}
}
@ -1076,7 +1078,7 @@ void usbnet_tx_timeout (struct net_device *net)
struct usbnet *dev = netdev_priv(net);
unlink_urbs (dev, &dev->txq);
tasklet_schedule (&dev->bh);
queue_work(usbnet_wq, &dev->bh_w);
// FIXME: device recovery -- reset?
}
@ -1263,13 +1265,21 @@ static void usbnet_bh (unsigned long param)
"rxqlen %d --> %d\n",
temp, dev->rxq.qlen);
if (dev->rxq.qlen < qlen)
tasklet_schedule (&dev->bh);
queue_work(usbnet_wq, &dev->bh_w);
}
if (dev->txq.qlen < TX_QLEN (dev))
netif_wake_queue (dev->net);
}
}
static void usbnet_bh_w(struct work_struct *work)
{
struct usbnet *dev =
container_of(work, struct usbnet, bh_w);
unsigned long param = (unsigned long)dev;
usbnet_bh(param);
}
/*-------------------------------------------------------------------------
*
@ -1388,8 +1398,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
skb_queue_head_init(&dev->rxq_pause);
dev->bh.func = usbnet_bh;
dev->bh.data = (unsigned long) dev;
INIT_WORK(&dev->bh_w, usbnet_bh_w);
INIT_WORK (&dev->kevent, kevent);
init_usb_anchor(&dev->deferred);
dev->delay.function = usbnet_bh;
@ -1573,7 +1582,7 @@ int usbnet_resume (struct usb_interface *intf)
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
if (!(dev->txq.qlen >= TX_QLEN(dev)))
netif_tx_wake_all_queues(dev->net);
tasklet_schedule (&dev->bh);
queue_work(usbnet_wq, &dev->bh_w);
}
}
return 0;
@ -1590,12 +1599,20 @@ static int __init usbnet_init(void)
FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
random_ether_addr(node_id);
usbnet_wq = create_singlethread_workqueue("usbnet");
if (!usbnet_wq) {
pr_err("%s: Unable to create workqueue:usbnet\n", __func__);
return -ENOMEM;
}
return 0;
}
module_init(usbnet_init);
static void __exit usbnet_exit(void)
{
destroy_workqueue(usbnet_wq);
}
module_exit(usbnet_exit);

View file

@ -56,7 +56,7 @@ struct usbnet {
struct sk_buff_head rxq_pause;
struct urb *interrupt;
struct usb_anchor deferred;
struct tasklet_struct bh;
struct work_struct bh_w;
struct work_struct kevent;
unsigned long flags;