summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp_illinois.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-11-21 13:20:17 -0500
committerDavid S. Miller <davem@davemloft.net>2016-11-21 13:20:17 -0500
commit9e36ced6335be42e637a827b64f510f143dbb5a7 (patch)
tree870e5c09c37890d286872c9ae22382d6bf336b37 /net/ipv4/tcp_illinois.c
parent2fcb58ab30deb63e49f238bf95d587740fab59c4 (diff)
parente97991832a4ea4a5f47d65f068a4c966a2eb5730 (diff)
Merge branch 'tcp-cong-undo_cwnd-mandatory'
Florian Westphal says: ==================== tcp: make undo_cwnd mandatory for congestion modules highspeed, illinois, scalable, veno and yeah congestion control algorithms don't provide a 'cwnd_undo' function. This makes the stack default to a 'reno undo' which doubles cwnd. However, the ssthresh implementation of these algorithms do not halve the slowstart threshold. This causes similar issue as the one fixed for dctcp in ce6dd23329b1e ("dctcp: avoid bogus doubling of cwnd after loss"). In light of this it seems better to remove the fallback and make undo_cwnd mandatory. First patch fixes those spots where reno undo seems incorrect by providing .cwnd_undo functions, second patch removes the fallback. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_illinois.c')
-rw-r--r--net/ipv4/tcp_illinois.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index c8e6d86be114..60352ff4f5a8 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -48,6 +48,7 @@ struct illinois {
u32 end_seq; /* right edge of current RTT */
u32 alpha; /* Additive increase */
u32 beta; /* Muliplicative decrease */
+ u32 loss_cwnd; /* cwnd on loss */
u16 acked; /* # packets acked by current ACK */
u8 rtt_above; /* average rtt has gone above threshold */
u8 rtt_low; /* # of rtts measurements below threshold */
@@ -296,10 +297,18 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct illinois *ca = inet_csk_ca(sk);
+ ca->loss_cwnd = tp->snd_cwnd;
/* Multiplicative decrease */
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
}
+static u32 tcp_illinois_cwnd_undo(struct sock *sk)
+{
+ const struct illinois *ca = inet_csk_ca(sk);
+
+ return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
+}
+
/* Extract info for Tcp socket info provided via netlink. */
static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info)
@@ -327,6 +336,7 @@ static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
static struct tcp_congestion_ops tcp_illinois __read_mostly = {
.init = tcp_illinois_init,
.ssthresh = tcp_illinois_ssthresh,
+ .undo_cwnd = tcp_illinois_cwnd_undo,
.cong_avoid = tcp_illinois_cong_avoid,
.set_state = tcp_illinois_state,
.get_info = tcp_illinois_info,