summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp_cong.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-11-21 13:20:17 -0500
committerDavid S. Miller <davem@davemloft.net>2016-11-21 13:20:17 -0500
commit9e36ced6335be42e637a827b64f510f143dbb5a7 (patch)
tree870e5c09c37890d286872c9ae22382d6bf336b37 /net/ipv4/tcp_cong.c
parent2fcb58ab30deb63e49f238bf95d587740fab59c4 (diff)
parente97991832a4ea4a5f47d65f068a4c966a2eb5730 (diff)
Merge branch 'tcp-cong-undo_cwnd-mandatory'
Florian Westphal says: ==================== tcp: make undo_cwnd mandatory for congestion modules highspeed, illinois, scalable, veno and yeah congestion control algorithms don't provide a 'cwnd_undo' function. This makes the stack default to a 'reno undo' which doubles cwnd. However, the ssthresh implementation of these algorithms do not halve the slowstart threshold. This causes similar issue as the one fixed for dctcp in ce6dd23329b1e ("dctcp: avoid bogus doubling of cwnd after loss"). In light of this it seems better to remove the fallback and make undo_cwnd mandatory. First patch fixes those spots where reno undo seems incorrect by providing .cwnd_undo functions, second patch removes the fallback. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_cong.c')
-rw-r--r--net/ipv4/tcp_cong.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 1294af4e0127..38905ec5f508 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -68,8 +68,9 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
{
int ret = 0;
- /* all algorithms must implement ssthresh and cong_avoid ops */
- if (!ca->ssthresh || !(ca->cong_avoid || ca->cong_control)) {
+ /* all algorithms must implement these */
+ if (!ca->ssthresh || !ca->undo_cwnd ||
+ !(ca->cong_avoid || ca->cong_control)) {
pr_err("%s does not implement required ops\n", ca->name);
return -EINVAL;
}
@@ -441,10 +442,19 @@ u32 tcp_reno_ssthresh(struct sock *sk)
}
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
+u32 tcp_reno_undo_cwnd(struct sock *sk)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+ return max(tp->snd_cwnd, tp->snd_ssthresh << 1);
+}
+EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
+
struct tcp_congestion_ops tcp_reno = {
.flags = TCP_CONG_NON_RESTRICTED,
.name = "reno",
.owner = THIS_MODULE,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
+ .undo_cwnd = tcp_reno_undo_cwnd,
};