Revert "net: tcp: Scale the TCP backlog queue to absorb packet bursts"

This reverts commit cf5d51af9e
The original change is not intended for mainlining. The change may have
detrimental effects on low memory devices, potentially leading to OOM
conditions if a misbehaved application does not read data from the
socket fast enough.

CRs-Fixed: 793795
Change-Id: If7746936b676888d2f53dca0723294a24e88295a
Signed-off-by: Harout Hedeshian <harouth@codeaurora.org>
This commit is contained in:
Harout Hedeshian 2015-02-10 09:44:18 -08:00 committed by Harout Hedeshian
parent e2d8ea30be
commit 0f719be598
2 changed files with 2 additions and 5 deletions

View file

@ -68,8 +68,6 @@
#include <net/dst.h>
#include <net/checksum.h>
#define TCP_BACKLOG_SCALE 4
struct cgroup;
struct cgroup_subsys;
#ifdef CONFIG_NET
@ -781,7 +779,7 @@ static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
unsigned int limit)
{
if (sk_rcvqueues_full(sk, skb, limit * TCP_BACKLOG_SCALE))
if (sk_rcvqueues_full(sk, skb, limit))
return -ENOBUFS;
__sk_add_backlog(sk, skb);

View file

@ -4094,8 +4094,7 @@ static int tcp_prune_queue(struct sock *sk);
static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
unsigned int size)
{
if (atomic_read(&sk->sk_rmem_alloc)
> ((sk->sk_rcvbuf + sk->sk_sndbuf) * 4) ||
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
!sk_rmem_schedule(sk, skb, size)) {
if (tcp_prune_queue(sk) < 0)