net: add a limit parameter to sk_add_backlog()

sk_add_backlog() & sk_rcvqueues_full() hard coded sk_rcvbuf as the
memory limit. We need to make this limit a parameter for TCP use.

No functional change expected in this patch, all callers still using the
old sk_rcvbuf limit.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Tom Herbert <therbert@google.com>
Cc: Maciej Żenczykowski <maze@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Cc: Rick Jones <rick.jones2@hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2012-04-22 23:34:26 +00:00 committed by David S. Miller
parent b98985073b
commit f545a38f74
10 changed files with 21 additions and 19 deletions

View File

@ -709,17 +709,19 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
* Do not take into account this skb truesize, * Do not take into account this skb truesize,
* to allow even a single big packet to come. * to allow even a single big packet to come.
*/ */
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb) static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
unsigned int limit)
{ {
unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
return qsize > sk->sk_rcvbuf; return qsize > limit;
} }
/* The per-socket spinlock must be held here. */ /* The per-socket spinlock must be held here. */
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb) static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
unsigned int limit)
{ {
if (sk_rcvqueues_full(sk, skb)) if (sk_rcvqueues_full(sk, skb, limit))
return -ENOBUFS; return -ENOBUFS;
__sk_add_backlog(sk, skb); __sk_add_backlog(sk, skb);

View File

@ -389,7 +389,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
skb->dev = NULL; skb->dev = NULL;
if (sk_rcvqueues_full(sk, skb)) { if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
atomic_inc(&sk->sk_drops); atomic_inc(&sk->sk_drops);
goto discard_and_relse; goto discard_and_relse;
} }
@ -406,7 +406,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
rc = sk_backlog_rcv(sk, skb); rc = sk_backlog_rcv(sk, skb);
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
} else if (sk_add_backlog(sk, skb)) { } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
bh_unlock_sock(sk); bh_unlock_sock(sk);
atomic_inc(&sk->sk_drops); atomic_inc(&sk->sk_drops);
goto discard_and_relse; goto discard_and_relse;

View File

@ -1752,7 +1752,7 @@ process:
if (!tcp_prequeue(sk, skb)) if (!tcp_prequeue(sk, skb))
ret = tcp_v4_do_rcv(sk, skb); ret = tcp_v4_do_rcv(sk, skb);
} }
} else if (unlikely(sk_add_backlog(sk, skb))) { } else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) {
bh_unlock_sock(sk); bh_unlock_sock(sk);
NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
goto discard_and_relse; goto discard_and_relse;

View File

@ -1479,7 +1479,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
goto drop; goto drop;
if (sk_rcvqueues_full(sk, skb)) if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
goto drop; goto drop;
rc = 0; rc = 0;
@ -1488,7 +1488,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
bh_lock_sock(sk); bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) if (!sock_owned_by_user(sk))
rc = __udp_queue_rcv_skb(sk, skb); rc = __udp_queue_rcv_skb(sk, skb);
else if (sk_add_backlog(sk, skb)) { else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
bh_unlock_sock(sk); bh_unlock_sock(sk);
goto drop; goto drop;
} }

View File

@ -1654,7 +1654,7 @@ process:
if (!tcp_prequeue(sk, skb)) if (!tcp_prequeue(sk, skb))
ret = tcp_v6_do_rcv(sk, skb); ret = tcp_v6_do_rcv(sk, skb);
} }
} else if (unlikely(sk_add_backlog(sk, skb))) { } else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) {
bh_unlock_sock(sk); bh_unlock_sock(sk);
NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
goto discard_and_relse; goto discard_and_relse;

View File

@ -611,14 +611,14 @@ static void flush_stack(struct sock **stack, unsigned int count,
sk = stack[i]; sk = stack[i];
if (skb1) { if (skb1) {
if (sk_rcvqueues_full(sk, skb1)) { if (sk_rcvqueues_full(sk, skb1, sk->sk_rcvbuf)) {
kfree_skb(skb1); kfree_skb(skb1);
goto drop; goto drop;
} }
bh_lock_sock(sk); bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb1); udpv6_queue_rcv_skb(sk, skb1);
else if (sk_add_backlog(sk, skb1)) { else if (sk_add_backlog(sk, skb1, sk->sk_rcvbuf)) {
kfree_skb(skb1); kfree_skb(skb1);
bh_unlock_sock(sk); bh_unlock_sock(sk);
goto drop; goto drop;
@ -790,14 +790,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
/* deliver */ /* deliver */
if (sk_rcvqueues_full(sk, skb)) { if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
sock_put(sk); sock_put(sk);
goto discard; goto discard;
} }
bh_lock_sock(sk); bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb); udpv6_queue_rcv_skb(sk, skb);
else if (sk_add_backlog(sk, skb)) { else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
atomic_inc(&sk->sk_drops); atomic_inc(&sk->sk_drops);
bh_unlock_sock(sk); bh_unlock_sock(sk);
sock_put(sk); sock_put(sk);

View File

@ -828,7 +828,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
else { else {
dprintk("%s: adding to backlog...\n", __func__); dprintk("%s: adding to backlog...\n", __func__);
llc_set_backlog_type(skb, LLC_PACKET); llc_set_backlog_type(skb, LLC_PACKET);
if (sk_add_backlog(sk, skb)) if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
goto drop_unlock; goto drop_unlock;
} }
out: out:

View File

@ -342,7 +342,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sctp_bh_lock_sock(sk); sctp_bh_lock_sock(sk);
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
if (sk_add_backlog(sk, skb)) if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
sctp_chunk_free(chunk); sctp_chunk_free(chunk);
else else
backloged = 1; backloged = 1;
@ -376,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
struct sctp_ep_common *rcvr = chunk->rcvr; struct sctp_ep_common *rcvr = chunk->rcvr;
int ret; int ret;
ret = sk_add_backlog(sk, skb); ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
if (!ret) { if (!ret) {
/* Hold the assoc/ep while hanging on the backlog queue. /* Hold the assoc/ep while hanging on the backlog queue.
* This way, we know structures we need will not disappear * This way, we know structures we need will not disappear

View File

@ -1330,7 +1330,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
if (!sock_owned_by_user(sk)) { if (!sock_owned_by_user(sk)) {
res = filter_rcv(sk, buf); res = filter_rcv(sk, buf);
} else { } else {
if (sk_add_backlog(sk, buf)) if (sk_add_backlog(sk, buf, sk->sk_rcvbuf))
res = TIPC_ERR_OVERLOAD; res = TIPC_ERR_OVERLOAD;
else else
res = TIPC_OK; res = TIPC_OK;

View File

@ -58,7 +58,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
if (!sock_owned_by_user(sk)) { if (!sock_owned_by_user(sk)) {
queued = x25_process_rx_frame(sk, skb); queued = x25_process_rx_frame(sk, skb);
} else { } else {
queued = !sk_add_backlog(sk, skb); queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf);
} }
bh_unlock_sock(sk); bh_unlock_sock(sk);
sock_put(sk); sock_put(sk);