tcp: remove Appropriate Byte Count support

TCP Appropriate Byte Count was added by me, but later disabled.
There is no point in maintaining it since it is a potential source
of bugs and Linux already implements other better window protection
heuristics.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Change-Id: Ifc93fa043256c275580a9f93611ad1d013889bc4
This commit is contained in:
Stephen Hemminger 2013-02-05 07:25:17 +00:00 committed by surblazer
parent a9d180c942
commit 39b326bec0
9 changed files with 1 additions and 67 deletions

View File

@ -130,17 +130,6 @@ somaxconn - INTEGER
Defaults to 128. See also tcp_max_syn_backlog for additional tuning Defaults to 128. See also tcp_max_syn_backlog for additional tuning
for TCP sockets. for TCP sockets.
tcp_abc - INTEGER
Controls Appropriate Byte Count (ABC) defined in RFC3465.
ABC is a way of increasing congestion window (cwnd) more slowly
in response to partial acknowledgments.
Possible values are:
0 increase cwnd once per acknowledgment (no ABC)
1 increase cwnd once per acknowledgment of full sized segment
2 allow increase cwnd by two if acknowledgment is
of two segments to compensate for delayed acknowledgments.
Default: 0 (off)
tcp_abort_on_overflow - BOOLEAN tcp_abort_on_overflow - BOOLEAN
If listening service is too slow to accept new connections, If listening service is too slow to accept new connections,
reset them. Default state is FALSE. It means that if overflow reset them. Default state is FALSE. It means that if overflow

View File

@ -398,7 +398,6 @@ struct tcp_sock {
u32 sacked_out; /* SACK'd packets */ u32 sacked_out; /* SACK'd packets */
u32 fackets_out; /* FACK'd packets */ u32 fackets_out; /* FACK'd packets */
u32 tso_deferred; u32 tso_deferred;
u32 bytes_acked; /* Appropriate Byte Counting - RFC3465 */
/* from STCP, retrans queue hinting */ /* from STCP, retrans queue hinting */
struct sk_buff* lost_skb_hint; struct sk_buff* lost_skb_hint;

View File

@ -245,7 +245,6 @@ extern int sysctl_tcp_dma_copybreak;
extern int sysctl_tcp_nometrics_save; extern int sysctl_tcp_nometrics_save;
extern int sysctl_tcp_moderate_rcvbuf; extern int sysctl_tcp_moderate_rcvbuf;
extern int sysctl_tcp_tso_win_divisor; extern int sysctl_tcp_tso_win_divisor;
extern int sysctl_tcp_abc;
extern int sysctl_tcp_mtu_probing; extern int sysctl_tcp_mtu_probing;
extern int sysctl_tcp_base_mss; extern int sysctl_tcp_base_mss;
extern int sysctl_tcp_workaround_signed_windows; extern int sysctl_tcp_workaround_signed_windows;

View File

@ -388,7 +388,6 @@ static const struct bin_table bin_net_ipv4_table[] = {
{ CTL_INT, NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" }, { CTL_INT, NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" },
{ CTL_INT, NET_TCP_TSO_WIN_DIVISOR, "tcp_tso_win_divisor" }, { CTL_INT, NET_TCP_TSO_WIN_DIVISOR, "tcp_tso_win_divisor" },
{ CTL_STR, NET_TCP_CONG_CONTROL, "tcp_congestion_control" }, { CTL_STR, NET_TCP_CONG_CONTROL, "tcp_congestion_control" },
{ CTL_INT, NET_TCP_ABC, "tcp_abc" },
{ CTL_INT, NET_TCP_MTU_PROBING, "tcp_mtu_probing" }, { CTL_INT, NET_TCP_MTU_PROBING, "tcp_mtu_probing" },
{ CTL_INT, NET_TCP_BASE_MSS, "tcp_base_mss" }, { CTL_INT, NET_TCP_BASE_MSS, "tcp_base_mss" },
{ CTL_INT, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" }, { CTL_INT, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" },

View File

@ -587,13 +587,6 @@ static struct ctl_table ipv4_table[] = {
.maxlen = TCP_CA_NAME_MAX, .maxlen = TCP_CA_NAME_MAX,
.proc_handler = proc_tcp_congestion_control, .proc_handler = proc_tcp_congestion_control,
}, },
{
.procname = "tcp_abc",
.data = &sysctl_tcp_abc,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ {
.procname = "tcp_mtu_probing", .procname = "tcp_mtu_probing",
.data = &sysctl_tcp_mtu_probing, .data = &sysctl_tcp_mtu_probing,

View File

@ -2144,7 +2144,6 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->packets_out = 0; tp->packets_out = 0;
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd_cnt = 0; tp->snd_cwnd_cnt = 0;
tp->bytes_acked = 0;
tp->window_clamp = 0; tp->window_clamp = 0;
tcp_set_ca_state(sk, TCP_CA_Open); tcp_set_ca_state(sk, TCP_CA_Open);
tcp_clear_retrans(tp); tcp_clear_retrans(tp);

View File

@ -310,28 +310,11 @@ void tcp_slow_start(struct tcp_sock *tp)
{ {
int cnt; /* increase in packets */ int cnt; /* increase in packets */
/* RFC3465: ABC Slow start
* Increase only after a full MSS of bytes is acked
*
* TCP sender SHOULD increase cwnd by the number of
* previously unacknowledged bytes ACKed by each incoming
* acknowledgment, provided the increase is not more than L
*/
if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache)
return;
if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */
else else
cnt = tp->snd_cwnd; /* exponential increase */ cnt = tp->snd_cwnd; /* exponential increase */
/* RFC3465: ABC
* We MAY increase by 2 if discovered delayed ack
*/
if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache)
cnt <<= 1;
tp->bytes_acked = 0;
tp->snd_cwnd_cnt += cnt; tp->snd_cwnd_cnt += cnt;
while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { while (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
tp->snd_cwnd_cnt -= tp->snd_cwnd; tp->snd_cwnd_cnt -= tp->snd_cwnd;
@ -371,20 +354,9 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
/* In "safe" area, increase. */ /* In "safe" area, increase. */
if (tp->snd_cwnd <= tp->snd_ssthresh) if (tp->snd_cwnd <= tp->snd_ssthresh)
tcp_slow_start(tp); tcp_slow_start(tp);
/* In dangerous area, increase slowly. */ /* In dangerous area, increase slowly. */
else if (sysctl_tcp_abc) { else
/* RFC3465: Appropriate Byte Count
* increase once for each full cwnd acked
*/
if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache;
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
}
} else {
tcp_cong_avoid_ai(tp, tp->snd_cwnd); tcp_cong_avoid_ai(tp, tp->snd_cwnd);
}
} }
EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);

View File

@ -101,7 +101,6 @@ int sysctl_tcp_nometrics_save __read_mostly;
int sysctl_tcp_thin_dupack __read_mostly; int sysctl_tcp_thin_dupack __read_mostly;
int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
int sysctl_tcp_abc __read_mostly;
int sysctl_tcp_default_init_rwnd __read_mostly = TCP_DEFAULT_INIT_RCVWND; int sysctl_tcp_default_init_rwnd __read_mostly = TCP_DEFAULT_INIT_RCVWND;
#define FLAG_DATA 0x01 /* Incoming frame contained data. */ #define FLAG_DATA 0x01 /* Incoming frame contained data. */
@ -850,7 +849,6 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
tp->prior_ssthresh = 0; tp->prior_ssthresh = 0;
tp->bytes_acked = 0;
if (icsk->icsk_ca_state < TCP_CA_CWR) { if (icsk->icsk_ca_state < TCP_CA_CWR) {
tp->undo_marker = 0; tp->undo_marker = 0;
if (set_ssthresh) if (set_ssthresh)
@ -2210,7 +2208,6 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
tp->snd_cwnd_cnt = 0; tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_cwnd_stamp = tcp_time_stamp;
tp->frto_counter = 0; tp->frto_counter = 0;
tp->bytes_acked = 0;
tp->reordering = min_t(unsigned int, tp->reordering, tp->reordering = min_t(unsigned int, tp->reordering,
sysctl_tcp_reordering); sysctl_tcp_reordering);
@ -2259,7 +2256,6 @@ void tcp_enter_loss(struct sock *sk, int how)
tp->snd_cwnd_cnt = 0; tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_cwnd_stamp = tcp_time_stamp;
tp->bytes_acked = 0;
tcp_clear_retrans_partial(tp); tcp_clear_retrans_partial(tp);
if (tcp_is_reno(tp)) if (tcp_is_reno(tp))
@ -3175,7 +3171,6 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
TCP_ECN_queue_cwr(tp); TCP_ECN_queue_cwr(tp);
} }
tp->bytes_acked = 0;
tp->snd_cwnd_cnt = 0; tp->snd_cwnd_cnt = 0;
tp->prior_cwnd = tp->snd_cwnd; tp->prior_cwnd = tp->snd_cwnd;
tp->prr_delivered = 0; tp->prr_delivered = 0;
@ -3558,7 +3553,6 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
{ {
tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
tp->snd_cwnd_cnt = 0; tp->snd_cwnd_cnt = 0;
tp->bytes_acked = 0;
TCP_ECN_queue_cwr(tp); TCP_ECN_queue_cwr(tp);
tcp_moderate_cwnd(tp); tcp_moderate_cwnd(tp);
} }
@ -3778,15 +3772,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (after(ack, prior_snd_una)) if (after(ack, prior_snd_una))
flag |= FLAG_SND_UNA_ADVANCED; flag |= FLAG_SND_UNA_ADVANCED;
if (sysctl_tcp_abc) {
if (icsk->icsk_ca_state < TCP_CA_CWR)
tp->bytes_acked += ack - prior_snd_una;
else if (icsk->icsk_ca_state == TCP_CA_Loss)
/* we assume just one segment left network */
tp->bytes_acked += min(ack - prior_snd_una,
tp->mss_cache);
}
prior_fackets = tp->fackets_out; prior_fackets = tp->fackets_out;
prior_in_flight = tcp_packets_in_flight(tp); prior_in_flight = tcp_packets_in_flight(tp);

View File

@ -490,7 +490,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
*/ */
newtp->snd_cwnd = TCP_INIT_CWND; newtp->snd_cwnd = TCP_INIT_CWND;
newtp->snd_cwnd_cnt = 0; newtp->snd_cwnd_cnt = 0;
newtp->bytes_acked = 0;
newtp->frto_counter = 0; newtp->frto_counter = 0;
newtp->frto_highmark = 0; newtp->frto_highmark = 0;