mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
kernel: Revert "tcp: do not lock listener to process SYN packets"
This commit belongs to the patch set (https://lwn.net/Articles/659199/) that attempts to remove the use of locks on the socket table by relocating the SYN table to a separate hash table and adding a spin lock to protect the SYN request queue. Adding only this commit introduces a race condition for LineageOS kernels for TCP listens, since the TCP SYN data structures can be corrupted. A TCP curl bomb on a TCP listen port will corrupt the SYN accept backlog: for i in $(seq 1 400); do curl -x localhost:443 https://myhost.com -L --connect-timeout 30 -o /dev/null -sS & done Run `ss -nltp` and usually the RecVQ column does not drain to 0. This reverts commit 7d9f104f9cabe1d72a50c4816a48f64fc1da7a64. This really needs to be reverted across all LineageOS forks: https://gitlab.com/LineageOS/issues/android/-/issues/3916#note_669493796 Change-Id: Ia7969aeedae411677b307a8e094f9a4cc02b801d
This commit is contained in:
parent
0b02bd0ec5
commit
7c56badc9a
2 changed files with 2 additions and 14 deletions
|
@ -1589,7 +1589,7 @@ static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
|
||||||
|
|
||||||
|
|
||||||
/* The socket must have it's spinlock held when we get
|
/* The socket must have it's spinlock held when we get
|
||||||
* here, unless it is a TCP_LISTEN socket.
|
* here.
|
||||||
*
|
*
|
||||||
* We have a potential double-lock case here, so even when
|
* We have a potential double-lock case here, so even when
|
||||||
* doing backlog processing we use the BH locking scheme.
|
* doing backlog processing we use the BH locking scheme.
|
||||||
|
@ -1728,11 +1728,6 @@ process:
|
||||||
|
|
||||||
skb->dev = NULL;
|
skb->dev = NULL;
|
||||||
|
|
||||||
if (sk->sk_state == TCP_LISTEN) {
|
|
||||||
ret = tcp_v4_do_rcv(sk, skb);
|
|
||||||
goto put_and_return;
|
|
||||||
}
|
|
||||||
|
|
||||||
bh_lock_sock_nested(sk);
|
bh_lock_sock_nested(sk);
|
||||||
tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
|
tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
@ -1756,7 +1751,6 @@ process:
|
||||||
}
|
}
|
||||||
bh_unlock_sock(sk);
|
bh_unlock_sock(sk);
|
||||||
|
|
||||||
put_and_return:
|
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -1464,7 +1464,7 @@ static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The socket must have it's spinlock held when we get
|
/* The socket must have it's spinlock held when we get
|
||||||
* here, unless it is a TCP_LISTEN socket.
|
* here.
|
||||||
*
|
*
|
||||||
* We have a potential double-lock case here, so even when
|
* We have a potential double-lock case here, so even when
|
||||||
* doing backlog processing we use the BH locking scheme.
|
* doing backlog processing we use the BH locking scheme.
|
||||||
|
@ -1658,11 +1658,6 @@ process:
|
||||||
|
|
||||||
skb->dev = NULL;
|
skb->dev = NULL;
|
||||||
|
|
||||||
if (sk->sk_state == TCP_LISTEN) {
|
|
||||||
ret = tcp_v6_do_rcv(sk, skb);
|
|
||||||
goto put_and_return;
|
|
||||||
}
|
|
||||||
|
|
||||||
bh_lock_sock_nested(sk);
|
bh_lock_sock_nested(sk);
|
||||||
tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
|
tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
@ -1686,7 +1681,6 @@ process:
|
||||||
}
|
}
|
||||||
bh_unlock_sock(sk);
|
bh_unlock_sock(sk);
|
||||||
|
|
||||||
put_and_return:
|
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
return ret ? -1 : 0;
|
return ret ? -1 : 0;
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue