net: tcp: Scale the TCP backlog queue to absorb packet bursts

A large momentary influx of packets flooding the TCP layer may cause
packets to get dropped at the socket's backlog queue. Bump this up to
prevent these drops. Note that this change may cause the socket memory
accounting to allow the total backlog queue length to exceed the user
space configured values, sometimes by a substantial amount, which can
lead to out of order packets to be dropped instead of being queued. To
avoid these ofo drops, the condition to drop an out of order packet is
modified to allow out of order queuing to continue as long as it falls
within the now increased backlog queue limit.

Change-Id: I447ffc8560cb149fe84193c72bf693862f7ec740
Signed-off-by: Harout Hedeshian <harouth@codeaurora.org>
This commit is contained in:
Harout Hedeshian 2015-02-02 13:30:42 -07:00
parent 0b9ec111f5
commit cf5d51af9e
2 changed files with 5 additions and 2 deletions

View file

@ -68,6 +68,8 @@
#include <net/dst.h>
#include <net/checksum.h>
#define TCP_BACKLOG_SCALE 4
struct cgroup;
struct cgroup_subsys;
#ifdef CONFIG_NET
@ -779,7 +781,7 @@ static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
unsigned int limit)
{
if (sk_rcvqueues_full(sk, skb, limit))
if (sk_rcvqueues_full(sk, skb, limit * TCP_BACKLOG_SCALE))
return -ENOBUFS;
__sk_add_backlog(sk, skb);

View file

@ -4094,7 +4094,8 @@ static int tcp_prune_queue(struct sock *sk);
static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
unsigned int size)
{
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
if (atomic_read(&sk->sk_rmem_alloc)
> ((sk->sk_rcvbuf + sk->sk_sndbuf) * 4) ||
!sk_rmem_schedule(sk, skb, size)) {
if (tcp_prune_queue(sk) < 0)