net: rmnet_data: Further optimize UL aggregation accumulation

Do not aggregate frames if they are sapced out more than 10ms. Since the
scedule_delayed_work() API only takes time in jiffies, ping packets
are getting substantially delayed. Instead, just send them. This parameter
is tunable from the module parameters location.

CRs-Fixed: 772705
Change-Id: I6ac337c8d61b1290f939b86081070c14c2c757b1
Signed-off-by: Harout Hedeshian <harouth@codeaurora.org>
This commit is contained in:
Harout Hedeshian 2014-12-17 10:22:36 -07:00
parent 89098ef425
commit af491b29a8
3 changed files with 35 additions and 4 deletions

View File

@ -59,6 +59,7 @@ struct rmnet_logical_ep_conf_s {
* aggregation
* @tail_spacing: Guaranteed padding (bytes) when de-aggregating ingress frames
* @agg_time: Wall clock time when aggregated frame was created
* @agg_last: Last time the aggregation routing was invoked
*/
struct rmnet_phys_ep_conf_s {
struct net_device *dev;
@ -70,12 +71,17 @@ struct rmnet_phys_ep_conf_s {
/* MAP specific */
uint16_t egress_agg_size;
uint16_t egress_agg_count;
uint8_t tail_spacing;
/* MAP aggregation state machine
* - This is not sctrictly configuration and is updated at runtime
* Make sure all of these are protected by the agg_lock
*/
spinlock_t agg_lock;
struct sk_buff *agg_skb;
uint8_t agg_state;
uint8_t agg_count;
uint8_t tail_spacing;
struct timespec agg_time;
struct timespec agg_last;
};
int rmnet_config_init(void);

View File

@ -48,6 +48,7 @@ enum rmnet_queue_xmit_e {
RMNET_STATS_QUEUE_XMIT_AGG_FILL_BUFFER,
RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT,
RMNET_STATS_QUEUE_XMIT_AGG_CPY_EXP_FAIL,
RMNET_STATS_QUEUE_XMIT_AGG_SKIP,
RMNET_STATS_QUEUE_XMIT_MAX
};

View File

@ -45,6 +45,10 @@ long agg_time_limit __read_mostly = 1000000L;
module_param(agg_time_limit, long, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(agg_time_limit, "Maximum time packets sit in the agg buf");
long agg_bypass_time __read_mostly = 10000000L;
module_param(agg_bypass_time, long, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");
struct agg_work {
struct delayed_work work;
@ -225,7 +229,7 @@ void rmnet_map_aggregate(struct sk_buff *skb,
struct agg_work *work;
unsigned long flags;
struct sk_buff *agg_skb;
struct timespec t, diff;
struct timespec diff, last;
int size, rc, agg_count = 0;
@ -240,7 +244,28 @@ void rmnet_map_aggregate(struct sk_buff *skb,
new_packet:
spin_lock_irqsave(&config->agg_lock, flags);
memcpy(&last, &(config->agg_last), sizeof(struct timespec));
getnstimeofday(&(config->agg_last));
if (!config->agg_skb) {
/* Check to see if we should agg first. If the traffic is very
* sparse, don't aggregate. We will need to tune this later
*/
diff = timespec_sub(config->agg_last, last);
if ((diff.tv_sec > 0) || (diff.tv_nsec > agg_bypass_time)) {
spin_unlock_irqrestore(&config->agg_lock, flags);
LOGL("delta t: %ld.%09lu\tcount: bypass", diff.tv_sec,
diff.tv_nsec);
rmnet_stats_agg_pkts(1);
trace_rmnet_map_aggregate(skb, 0);
rc = dev_queue_xmit(skb);
rmnet_stats_queue_xmit(rc,
RMNET_STATS_QUEUE_XMIT_AGG_SKIP);
return;
}
config->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
if (!config->agg_skb) {
config->agg_skb = 0;
@ -260,8 +285,7 @@ new_packet:
rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_CPY_EXPAND);
goto schedule;
}
getnstimeofday(&t);
diff = timespec_sub(t, config->agg_time);
diff = timespec_sub(config->agg_last, config->agg_time);
if (skb->len > (config->egress_agg_size - config->agg_skb->len)
|| (config->agg_count >= config->egress_agg_count)