net: sched: Schedule PRIO qdisc when flow control released

The PRIO qdisc supports flow control, such that packet
dequeue can be disabled based on boolean flag 'enable_flow'.
When flow is re-enabled, the latency for new packets
arriving at network driver is high.  To reduce the delay in
scheduling packets, the qdisc will now invoke
__netif_schedule() to expedite dequeue.  This significantly
reduces the latency of packets arriving at network driver.

Change-Id: Ic5fe3faf86f177300d3018b9f60974ba3811641c
CRs-Fixed: 355156
Acked-by: Jimi Shah <jimis@qualcomm.com>
Signed-off-by: Tianyi Gou <tgou@codeaurora.org>
This commit is contained in:
Tianyi Gou 2012-06-26 10:11:05 -06:00 committed by Stephen Boyd
parent b01d4acd6f
commit 57435786e2
1 changed files with 13 additions and 1 deletions

View File

@ -18,6 +18,7 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
@ -176,6 +177,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
struct prio_sched_data *q = qdisc_priv(sch);
struct tc_prio_qopt *qopt;
int i;
int flow_change = 0;
if (nla_len(opt) < sizeof(*qopt))
return -EINVAL;
@ -190,7 +192,10 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
}
sch_tree_lock(sch);
q->enable_flow = qopt->enable_flow;
if (q->enable_flow != qopt->enable_flow) {
q->enable_flow = qopt->enable_flow;
flow_change = 1;
}
q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
@ -225,6 +230,13 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
}
}
}
/* Schedule qdisc when flow re-enabled */
if (flow_change && q->enable_flow) {
if (!test_bit(__QDISC_STATE_DEACTIVATED,
&sch->state))
__netif_schedule(qdisc_root(sch));
}
return 0;
}