From c1e64e298b8cad309091b95d8436a0255c84f54a Mon Sep 17 00:00:00 2001 From: Lorenzo Colitti Date: Wed, 16 Dec 2015 12:30:05 +0900 Subject: net: diag: Support destroying TCP sockets. This implements SOCK_DESTROY for TCP sockets. It causes all blocking calls on the socket to fail fast with ECONNABORTED and causes a protocol close of the socket. It informs the other end of the connection by sending a RST, i.e., initiating a TCP ABORT as per RFC 793. ECONNABORTED was chosen for consistency with FreeBSD. Signed-off-by: Lorenzo Colitti Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/net/tcp.h') diff --git a/include/net/tcp.h b/include/net/tcp.h index f80e74c5ad18..3077735b348d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1170,6 +1170,8 @@ void tcp_set_state(struct sock *sk, int state); void tcp_done(struct sock *sk); +int tcp_abort(struct sock *sk, int err); + static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) { rx_opt->dsack = 0; -- cgit v1.2.3 From b1f0a0e99c58fbd7ea053ca36ba623718272b618 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 21 Dec 2015 21:29:24 +0100 Subject: net: add inet_sk_transparent() helper Avoids cluttering tcp_v4_send_reset when followup patch extends it to deal with timewait sockets. Suggested-by: Eric Dumazet Signed-off-by: Florian Westphal Acked-by: Eric Dumazet Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- include/net/request_sock.h | 2 +- include/net/tcp.h | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'include/net/tcp.h') diff --git a/include/net/request_sock.h b/include/net/request_sock.h index a0dde04eb178..f49759decb28 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -68,7 +68,7 @@ struct request_sock { u32 peer_secid; }; -static inline struct request_sock *inet_reqsk(struct sock *sk) +static inline struct request_sock *inet_reqsk(const struct sock *sk) { return (struct request_sock *)sk; } diff --git a/include/net/tcp.h b/include/net/tcp.h index 3077735b348d..f33fecf4e282 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1620,6 +1620,18 @@ static inline void tcp_highest_sack_combine(struct sock *sk, tcp_sk(sk)->highest_sack = new; } +/* This helper checks if socket has IP_TRANSPARENT set */ +static inline bool inet_sk_transparent(const struct sock *sk) +{ + switch (sk->sk_state) { + case TCP_TIME_WAIT: + return inet_twsk(sk)->tw_transparent; + case TCP_NEW_SYN_RECV: + return inet_rsk(inet_reqsk(sk))->no_srccheck; + } + return inet_sk(sk)->transparent; +} + /* Determines whether this is a thin stream (which may suffer from * increased latency). Used to trigger latency-reducing mechanisms. */ -- cgit v1.2.3 From 13b287e8d1cad951634389f85b8c9b816bd3bb1e Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 7 Jan 2016 16:38:43 +0200 Subject: ipv4: Namespaceify tcp_keepalive_time sysctl knob Different net namespaces might have different requirements as to the keepalive time of tcp sockets. This might be required in cases where different firewall rules are in place which require tcp timeout sockets to be increased/decreased independently of the host. Signed-off-by: Nikolay Borisov Signed-off-by: David S. Miller --- include/net/netns/ipv4.h | 2 ++ include/net/tcp.h | 5 +++-- net/ipv4/sysctl_net_ipv4.c | 14 +++++++------- net/ipv4/tcp_ipv4.c | 2 ++ net/ipv4/tcp_timer.c | 1 - 5 files changed, 14 insertions(+), 10 deletions(-) (limited to 'include/net/tcp.h') diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index d75be32650ba..9e9bbebaebd1 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -94,6 +94,8 @@ struct netns_ipv4 { int sysctl_tcp_probe_threshold; u32 sysctl_tcp_probe_interval; + int sysctl_tcp_keepalive_time; + struct ping_group_range ping_group_range; atomic_t dev_addr_genid; diff --git a/include/net/tcp.h b/include/net/tcp.h index f33fecf4e282..cb4d4cf25744 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -240,7 +240,6 @@ extern int sysctl_tcp_timestamps; extern int sysctl_tcp_window_scaling; extern int sysctl_tcp_sack; extern int sysctl_tcp_fin_timeout; -extern int sysctl_tcp_keepalive_time; extern int sysctl_tcp_keepalive_probes; extern int sysctl_tcp_keepalive_intvl; extern int sysctl_tcp_syn_retries; @@ -1230,7 +1229,9 @@ static inline int keepalive_intvl_when(const struct tcp_sock *tp) static inline int keepalive_time_when(const struct tcp_sock *tp) { - return tp->keepalive_time ? : sysctl_tcp_keepalive_time; + struct net *net = sock_net((struct sock *)tp); + + return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time; } static inline int keepalive_probes(const struct tcp_sock *tp) diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 41ff1f87dfd7..1886cc842871 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -336,13 +336,6 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, - { - .procname = "tcp_keepalive_time", - .data = &sysctl_tcp_keepalive_time, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_jiffies, - }, { .procname = "tcp_keepalive_probes", .data = &sysctl_tcp_keepalive_probes, @@ -961,6 +954,13 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "tcp_keepalive_time", + .data = &init_net.ipv4.sysctl_tcp_keepalive_time, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_jiffies, + }, { } }; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index fc4f72686705..6e14ff9a8580 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2385,6 +2385,8 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD; net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL; + net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME; + return 0; fail: tcp_sk_exit(net); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 193ba1fa8a9a..166f27b43cc0 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -24,7 +24,6 @@ int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES; int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES; -int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME; int sysctl_tcp_keepalive_probes __read_mostly = TCP_KEEPALIVE_PROBES; int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL; int sysctl_tcp_retries1 __read_mostly = TCP_RETR1; -- cgit v1.2.3 From 9bd6861bd4326e3afd3f14a9ec8a723771fb20bb Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 7 Jan 2016 16:38:44 +0200 Subject: ipv4: Namespecify tcp_keepalive_probes sysctl knob This is required to have full tcp keepalive mechanism namespace support. Signed-off-by: Nikolay Borisov Signed-off-by: David S. Miller --- include/net/netns/ipv4.h | 1 + include/net/tcp.h | 5 +++-- net/ipv4/sysctl_net_ipv4.c | 14 +++++++------- net/ipv4/tcp_ipv4.c | 1 + net/ipv4/tcp_timer.c | 1 - 5 files changed, 12 insertions(+), 10 deletions(-) (limited to 'include/net/tcp.h') diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 9e9bbebaebd1..6e26ea2d0374 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -95,6 +95,7 @@ struct netns_ipv4 { u32 sysctl_tcp_probe_interval; int sysctl_tcp_keepalive_time; + int sysctl_tcp_keepalive_probes; struct ping_group_range ping_group_range; diff --git a/include/net/tcp.h b/include/net/tcp.h index cb4d4cf25744..0646521400bf 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -240,7 +240,6 @@ extern int sysctl_tcp_timestamps; extern int sysctl_tcp_window_scaling; extern int sysctl_tcp_sack; extern int sysctl_tcp_fin_timeout; -extern int sysctl_tcp_keepalive_probes; extern int sysctl_tcp_keepalive_intvl; extern int sysctl_tcp_syn_retries; extern int sysctl_tcp_synack_retries; @@ -1236,7 +1235,9 @@ static inline int keepalive_time_when(const struct tcp_sock *tp) static inline int keepalive_probes(const struct tcp_sock *tp) { - return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; + struct net *net = sock_net((struct sock *)tp); + + return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes; } static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 1886cc842871..e99fbb77dba7 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -336,13 +336,6 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, - { - .procname = "tcp_keepalive_probes", - .data = &sysctl_tcp_keepalive_probes, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, { .procname = "tcp_keepalive_intvl", .data = &sysctl_tcp_keepalive_intvl, @@ -961,6 +954,13 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, + { + .procname = "tcp_keepalive_probes", + .data = &init_net.ipv4.sysctl_tcp_keepalive_probes, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, { } }; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 6e14ff9a8580..ed98de85871e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2386,6 +2386,7 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL; net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME; + net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES; return 0; fail: diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 166f27b43cc0..0ccb120d591a 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -24,7 +24,6 @@ int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES; int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES; -int sysctl_tcp_keepalive_probes __read_mostly = TCP_KEEPALIVE_PROBES; int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL; int sysctl_tcp_retries1 __read_mostly = TCP_RETR1; int sysctl_tcp_retries2 __read_mostly = TCP_RETR2; -- cgit v1.2.3 From b840d15d39128d08ed4486085e5507d2617b9ae1 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 7 Jan 2016 16:38:45 +0200 Subject: ipv4: Namespecify the tcp_keepalive_intvl sysctl knob This is the final part required to namespaceify the tcp keep alive mechanism. Signed-off-by: Nikolay Borisov Signed-off-by: David S. Miller --- include/net/netns/ipv4.h | 1 + include/net/tcp.h | 5 +++-- net/ipv4/sysctl_net_ipv4.c | 14 +++++++------- net/ipv4/tcp_ipv4.c | 1 + net/ipv4/tcp_timer.c | 1 - 5 files changed, 12 insertions(+), 10 deletions(-) (limited to 'include/net/tcp.h') diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 6e26ea2d0374..2b7907a35568 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -96,6 +96,7 @@ struct netns_ipv4 { int sysctl_tcp_keepalive_time; int sysctl_tcp_keepalive_probes; + int sysctl_tcp_keepalive_intvl; struct ping_group_range ping_group_range; diff --git a/include/net/tcp.h b/include/net/tcp.h index 0646521400bf..a80255f4ca33 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -240,7 +240,6 @@ extern int sysctl_tcp_timestamps; extern int sysctl_tcp_window_scaling; extern int sysctl_tcp_sack; extern int sysctl_tcp_fin_timeout; -extern int sysctl_tcp_keepalive_intvl; extern int sysctl_tcp_syn_retries; extern int sysctl_tcp_synack_retries; extern int sysctl_tcp_retries1; @@ -1223,7 +1222,9 @@ void tcp_enter_memory_pressure(struct sock *sk); static inline int keepalive_intvl_when(const struct tcp_sock *tp) { - return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl; + struct net *net = sock_net((struct sock *)tp); + + return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl; } static inline int keepalive_time_when(const struct tcp_sock *tp) diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index e99fbb77dba7..46ce410703b1 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -336,13 +336,6 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, - { - .procname = "tcp_keepalive_intvl", - .data = &sysctl_tcp_keepalive_intvl, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_jiffies, - }, { .procname = "tcp_retries1", .data = &sysctl_tcp_retries1, @@ -961,6 +954,13 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "tcp_keepalive_intvl", + .data = &init_net.ipv4.sysctl_tcp_keepalive_intvl, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_jiffies, + }, { } }; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ed98de85871e..65947c1f4733 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2387,6 +2387,7 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME; net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES; + net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL; return 0; fail: diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 0ccb120d591a..a4730a28b220 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -24,7 +24,6 @@ int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES; int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES; -int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL; int sysctl_tcp_retries1 __read_mostly = TCP_RETR1; int sysctl_tcp_retries2 __read_mostly = TCP_RETR2; int sysctl_tcp_orphan_retries __read_mostly; -- cgit v1.2.3 From e805605c721021879a1469bdae45c6f80bc985f4 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 14 Jan 2016 15:21:14 -0800 Subject: net: tcp_memcontrol: sanitize tcp memory accounting callbacks There won't be a tcp control soft limit, so integrating the memcg code into the global skmem limiting scheme complicates things unnecessarily. Replace this with simple and clear charge and uncharge calls--hidden behind a jump label--to account skb memory. Note that this is not purely aesthetic: as a result of shoehorning the per-memcg code into the same memory accounting functions that handle the global level, the old code would compare the per-memcg consumption against the smaller of the per-memcg limit and the global limit. This allowed the total consumption of multiple sockets to exceed the global limit, as long as the individual sockets stayed within bounds. After this change, the code will always compare the per-memcg consumption to the per-memcg limit, and the global consumption to the global limit, and thus close this loophole. Without a soft limit, the per-memcg memory pressure state in sockets is generally questionable. However, we did it until now, so we continue to enter it when the hard limit is hit, and packets are dropped, to let other sockets in the cgroup know that they shouldn't grow their transmit windows, either. However, keep it simple in the new callback model and leave memory pressure lazily when the next packet is accepted (as opposed to doing it synchroneously when packets are processed). When packets are dropped, network performance will already be in the toilet, so that should be a reasonable trade-off. As described above, consumption is now checked on the per-memcg level and the global level separately. Likewise, memory pressure states are maintained on both the per-memcg level and the global level, and a socket is considered under pressure when either level asserts as much. Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Acked-by: David S. Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 19 +++++++++----- include/net/sock.h | 64 ++++++---------------------------------------- include/net/tcp.h | 5 ++-- mm/memcontrol.c | 32 +++++++++++++++++++++++ net/core/sock.c | 26 +++++++++++-------- net/ipv4/tcp_output.c | 7 +++-- 6 files changed, 77 insertions(+), 76 deletions(-) (limited to 'include/net/tcp.h') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6c91c1b73951..e4e77bd1dd39 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -660,12 +660,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) } #endif /* CONFIG_MEMCG */ -enum { - UNDER_LIMIT, - SOFT_LIMIT, - OVER_LIMIT, -}; - #ifdef CONFIG_CGROUP_WRITEBACK struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); @@ -694,6 +688,19 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, struct sock; void sock_update_memcg(struct sock *sk); void sock_release_memcg(struct sock *sk); +bool mem_cgroup_charge_skmem(struct cg_proto *proto, unsigned int nr_pages); +void mem_cgroup_uncharge_skmem(struct cg_proto *proto, unsigned int nr_pages); +#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) +static inline bool mem_cgroup_under_socket_pressure(struct cg_proto *proto) +{ + return proto->memory_pressure; +} +#else +static inline bool mem_cgroup_under_pressure(struct cg_proto *proto) +{ + return false; +} +#endif #ifdef CONFIG_MEMCG_KMEM extern struct static_key memcg_kmem_enabled_key; diff --git a/include/net/sock.h b/include/net/sock.h index 8b1f8e5d3a48..94a6c1a740b9 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1129,8 +1129,9 @@ static inline bool sk_under_memory_pressure(const struct sock *sk) if (!sk->sk_prot->memory_pressure) return false; - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return !!sk->sk_cgrp->memory_pressure; + if (mem_cgroup_sockets_enabled && sk->sk_cgrp && + mem_cgroup_under_socket_pressure(sk->sk_cgrp)) + return true; return !!*sk->sk_prot->memory_pressure; } @@ -1144,9 +1145,6 @@ static inline void sk_leave_memory_pressure(struct sock *sk) if (*memory_pressure) *memory_pressure = 0; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - sk->sk_cgrp->memory_pressure = 0; } static inline void sk_enter_memory_pressure(struct sock *sk) @@ -1154,76 +1152,30 @@ static inline void sk_enter_memory_pressure(struct sock *sk) if (!sk->sk_prot->enter_memory_pressure) return; - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - sk->sk_cgrp->memory_pressure = 1; - sk->sk_prot->enter_memory_pressure(sk); } static inline long sk_prot_mem_limits(const struct sock *sk, int index) { - long limit = sk->sk_prot->sysctl_mem[index]; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - limit = min_t(long, limit, sk->sk_cgrp->memory_allocated.limit); - - return limit; -} - -static inline void memcg_memory_allocated_add(struct cg_proto *prot, - unsigned long amt, - int *parent_status) -{ - struct page_counter *counter; - - if (page_counter_try_charge(&prot->memory_allocated, amt, &counter)) - return; - - page_counter_charge(&prot->memory_allocated, amt); - *parent_status = OVER_LIMIT; -} - -static inline void memcg_memory_allocated_sub(struct cg_proto *prot, - unsigned long amt) -{ - page_counter_uncharge(&prot->memory_allocated, amt); + return sk->sk_prot->sysctl_mem[index]; } static inline long sk_memory_allocated(const struct sock *sk) { - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return page_counter_read(&sk->sk_cgrp->memory_allocated); - - return atomic_long_read(prot->memory_allocated); + return atomic_long_read(sk->sk_prot->memory_allocated); } static inline long -sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status) +sk_memory_allocated_add(struct sock *sk, int amt) { - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { - memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status); - /* update the root cgroup regardless */ - atomic_long_add_return(amt, prot->memory_allocated); - return page_counter_read(&sk->sk_cgrp->memory_allocated); - } - - return atomic_long_add_return(amt, prot->memory_allocated); + return atomic_long_add_return(amt, sk->sk_prot->memory_allocated); } static inline void sk_memory_allocated_sub(struct sock *sk, int amt) { - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - memcg_memory_allocated_sub(sk->sk_cgrp, amt); - - atomic_long_sub(amt, prot->memory_allocated); + atomic_long_sub(amt, sk->sk_prot->memory_allocated); } static inline void sk_sockets_allocated_dec(struct sock *sk) diff --git a/include/net/tcp.h b/include/net/tcp.h index a80255f4ca33..d9df80deba31 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -289,8 +289,9 @@ extern int tcp_memory_pressure; /* optimized version of sk_under_memory_pressure() for TCP sockets */ static inline bool tcp_under_memory_pressure(const struct sock *sk) { - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return !!sk->sk_cgrp->memory_pressure; + if (mem_cgroup_sockets_enabled && sk->sk_cgrp && + mem_cgroup_under_socket_pressure(sk->sk_cgrp)) + return true; return tcp_memory_pressure; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d9344dad207e..f5de783860b8 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -338,6 +338,38 @@ struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) } EXPORT_SYMBOL(tcp_proto_cgroup); +/** + * mem_cgroup_charge_skmem - charge socket memory + * @proto: proto to charge + * @nr_pages: number of pages to charge + * + * Charges @nr_pages to @proto. Returns %true if the charge fit within + * @proto's configured limit, %false if the charge had to be forced. + */ +bool mem_cgroup_charge_skmem(struct cg_proto *proto, unsigned int nr_pages) +{ + struct page_counter *counter; + + if (page_counter_try_charge(&proto->memory_allocated, + nr_pages, &counter)) { + proto->memory_pressure = 0; + return true; + } + page_counter_charge(&proto->memory_allocated, nr_pages); + proto->memory_pressure = 1; + return false; +} + +/** + * mem_cgroup_uncharge_skmem - uncharge socket memory + * @proto - proto to uncharge + * @nr_pages - number of pages to uncharge + */ +void mem_cgroup_uncharge_skmem(struct cg_proto *proto, unsigned int nr_pages) +{ + page_counter_uncharge(&proto->memory_allocated, nr_pages); +} + #endif #ifdef CONFIG_MEMCG_KMEM diff --git a/net/core/sock.c b/net/core/sock.c index 6c5dab01105b..89ae859d2dc5 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2084,27 +2084,27 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind) struct proto *prot = sk->sk_prot; int amt = sk_mem_pages(size); long allocated; - int parent_status = UNDER_LIMIT; sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; - allocated = sk_memory_allocated_add(sk, amt, &parent_status); + allocated = sk_memory_allocated_add(sk, amt); + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp && + !mem_cgroup_charge_skmem(sk->sk_cgrp, amt)) + goto suppress_allocation; /* Under limit. */ - if (parent_status == UNDER_LIMIT && - allocated <= sk_prot_mem_limits(sk, 0)) { + if (allocated <= sk_prot_mem_limits(sk, 0)) { sk_leave_memory_pressure(sk); return 1; } - /* Under pressure. (we or our parents) */ - if ((parent_status > SOFT_LIMIT) || - allocated > sk_prot_mem_limits(sk, 1)) + /* Under pressure. */ + if (allocated > sk_prot_mem_limits(sk, 1)) sk_enter_memory_pressure(sk); - /* Over hard limit (we or our parents) */ - if ((parent_status == OVER_LIMIT) || - (allocated > sk_prot_mem_limits(sk, 2))) + /* Over hard limit. */ + if (allocated > sk_prot_mem_limits(sk, 2)) goto suppress_allocation; /* guarantee minimum buffer size under pressure */ @@ -2153,6 +2153,9 @@ suppress_allocation: sk_memory_allocated_sub(sk, amt); + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) + mem_cgroup_uncharge_skmem(sk->sk_cgrp, amt); + return 0; } EXPORT_SYMBOL(__sk_mem_schedule); @@ -2168,6 +2171,9 @@ void __sk_mem_reclaim(struct sock *sk, int amount) sk_memory_allocated_sub(sk, amount); sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT; + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) + mem_cgroup_uncharge_skmem(sk->sk_cgrp, amount); + if (sk_under_memory_pressure(sk) && (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) sk_leave_memory_pressure(sk); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 412a920fe0ec..493b48945f0c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2813,13 +2813,16 @@ begin_fwd: */ void sk_forced_mem_schedule(struct sock *sk, int size) { - int amt, status; + int amt; if (size <= sk->sk_forward_alloc) return; amt = sk_mem_pages(size); sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; - sk_memory_allocated_add(sk, amt, &status); + sk_memory_allocated_add(sk, amt); + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) + mem_cgroup_charge_skmem(sk->sk_cgrp, amt); } /* Send a FIN. The caller locks the socket for us. -- cgit v1.2.3 From baac50bbc3cdfd184ebf586b1704edbfcee866df Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 14 Jan 2016 15:21:17 -0800 Subject: net: tcp_memcontrol: simplify linkage between socket and page counter There won't be any separate counters for socket memory consumed by protocols other than TCP in the future. Remove the indirection and link sockets directly to their owning memory cgroup. Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Acked-by: David S. Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 20 ++++--------- include/net/sock.h | 25 +++-------------- include/net/tcp.h | 4 +-- include/net/tcp_memcontrol.h | 1 - mm/memcontrol.c | 57 +++++++++++++++---------------------- net/core/sock.c | 52 +++++----------------------------- net/ipv4/tcp_ipv4.c | 7 +---- net/ipv4/tcp_memcontrol.c | 67 +++++++++++++++++--------------------------- net/ipv4/tcp_output.c | 4 +-- net/ipv6/tcp_ipv6.c | 3 -- 10 files changed, 69 insertions(+), 171 deletions(-) (limited to 'include/net/tcp.h') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e4e77bd1dd39..7c085e4636ba 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -89,16 +89,6 @@ struct cg_proto { struct page_counter memory_allocated; /* Current allocated memory. */ int memory_pressure; bool active; - /* - * memcg field is used to find which memcg we belong directly - * Each memcg struct can hold more than one cg_proto, so container_of - * won't really cut. - * - * The elegant solution would be having an inverse function to - * proto_cgroup in struct proto, but that means polluting the structure - * for everybody, instead of just for memcg users. - */ - struct mem_cgroup *memcg; }; #ifdef CONFIG_MEMCG @@ -688,15 +678,15 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, struct sock; void sock_update_memcg(struct sock *sk); void sock_release_memcg(struct sock *sk); -bool mem_cgroup_charge_skmem(struct cg_proto *proto, unsigned int nr_pages); -void mem_cgroup_uncharge_skmem(struct cg_proto *proto, unsigned int nr_pages); +bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); +void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) -static inline bool mem_cgroup_under_socket_pressure(struct cg_proto *proto) +static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) { - return proto->memory_pressure; + return memcg->tcp_mem.memory_pressure; } #else -static inline bool mem_cgroup_under_pressure(struct cg_proto *proto) +static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) { return false; } diff --git a/include/net/sock.h b/include/net/sock.h index 94a6c1a740b9..be96a8dcbc74 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -71,22 +71,6 @@ #include #include -struct cgroup; -struct cgroup_subsys; -#ifdef CONFIG_NET -int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss); -void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg); -#else -static inline -int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) -{ - return 0; -} -static inline -void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) -{ -} -#endif /* * This structure really needs to be cleaned up. * Most of it is for TCP, and not used by any of @@ -245,7 +229,6 @@ struct sock_common { /* public: */ }; -struct cg_proto; /** * struct sock - network layer representation of sockets * @__sk_common: shared layout with inet_timewait_sock @@ -310,7 +293,7 @@ struct cg_proto; * @sk_security: used by security modules * @sk_mark: generic packet mark * @sk_cgrp_data: cgroup data for this cgroup - * @sk_cgrp: this socket's cgroup-specific proto data + * @sk_memcg: this socket's memory cgroup association * @sk_write_pending: a write to stream socket waits to start * @sk_state_change: callback to indicate change in the state of the sock * @sk_data_ready: callback to indicate there is data to be processed @@ -446,7 +429,7 @@ struct sock { void *sk_security; #endif struct sock_cgroup_data sk_cgrp_data; - struct cg_proto *sk_cgrp; + struct mem_cgroup *sk_memcg; void (*sk_state_change)(struct sock *sk); void (*sk_data_ready)(struct sock *sk); void (*sk_write_space)(struct sock *sk); @@ -1129,8 +1112,8 @@ static inline bool sk_under_memory_pressure(const struct sock *sk) if (!sk->sk_prot->memory_pressure) return false; - if (mem_cgroup_sockets_enabled && sk->sk_cgrp && - mem_cgroup_under_socket_pressure(sk->sk_cgrp)) + if (mem_cgroup_sockets_enabled && sk->sk_memcg && + mem_cgroup_under_socket_pressure(sk->sk_memcg)) return true; return !!*sk->sk_prot->memory_pressure; diff --git a/include/net/tcp.h b/include/net/tcp.h index d9df80deba31..8ea19977ea53 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -289,8 +289,8 @@ extern int tcp_memory_pressure; /* optimized version of sk_under_memory_pressure() for TCP sockets */ static inline bool tcp_under_memory_pressure(const struct sock *sk) { - if (mem_cgroup_sockets_enabled && sk->sk_cgrp && - mem_cgroup_under_socket_pressure(sk->sk_cgrp)) + if (mem_cgroup_sockets_enabled && sk->sk_memcg && + mem_cgroup_under_socket_pressure(sk->sk_memcg)) return true; return tcp_memory_pressure; diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h index 05b94d9453de..3a17b16ae8aa 100644 --- a/include/net/tcp_memcontrol.h +++ b/include/net/tcp_memcontrol.h @@ -1,7 +1,6 @@ #ifndef _TCP_MEMCG_H #define _TCP_MEMCG_H -struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg); int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss); void tcp_destroy_cgroup(struct mem_cgroup *memcg); #endif /* _TCP_MEMCG_H */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f5de783860b8..eaaa86126277 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -294,9 +294,6 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) void sock_update_memcg(struct sock *sk) { struct mem_cgroup *memcg; - struct cg_proto *cg_proto; - - BUG_ON(!sk->sk_prot->proto_cgroup); /* Socket cloning can throw us here with sk_cgrp already * filled. It won't however, necessarily happen from @@ -306,68 +303,58 @@ void sock_update_memcg(struct sock *sk) * Respecting the original socket's memcg is a better * decision in this case. */ - if (sk->sk_cgrp) { - BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg)); - css_get(&sk->sk_cgrp->memcg->css); + if (sk->sk_memcg) { + BUG_ON(mem_cgroup_is_root(sk->sk_memcg)); + css_get(&sk->sk_memcg->css); return; } rcu_read_lock(); memcg = mem_cgroup_from_task(current); - cg_proto = sk->sk_prot->proto_cgroup(memcg); - if (cg_proto && cg_proto->active && - css_tryget_online(&memcg->css)) { - sk->sk_cgrp = cg_proto; - } + if (memcg != root_mem_cgroup && + memcg->tcp_mem.active && + css_tryget_online(&memcg->css)) + sk->sk_memcg = memcg; rcu_read_unlock(); } EXPORT_SYMBOL(sock_update_memcg); void sock_release_memcg(struct sock *sk) { - WARN_ON(!sk->sk_cgrp->memcg); - css_put(&sk->sk_cgrp->memcg->css); -} - -struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) -{ - if (!memcg || mem_cgroup_is_root(memcg)) - return NULL; - - return &memcg->tcp_mem; + WARN_ON(!sk->sk_memcg); + css_put(&sk->sk_memcg->css); } -EXPORT_SYMBOL(tcp_proto_cgroup); /** * mem_cgroup_charge_skmem - charge socket memory - * @proto: proto to charge + * @memcg: memcg to charge * @nr_pages: number of pages to charge * - * Charges @nr_pages to @proto. Returns %true if the charge fit within - * @proto's configured limit, %false if the charge had to be forced. + * Charges @nr_pages to @memcg. Returns %true if the charge fit within + * @memcg's configured limit, %false if the charge had to be forced. */ -bool mem_cgroup_charge_skmem(struct cg_proto *proto, unsigned int nr_pages) +bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) { struct page_counter *counter; - if (page_counter_try_charge(&proto->memory_allocated, + if (page_counter_try_charge(&memcg->tcp_mem.memory_allocated, nr_pages, &counter)) { - proto->memory_pressure = 0; + memcg->tcp_mem.memory_pressure = 0; return true; } - page_counter_charge(&proto->memory_allocated, nr_pages); - proto->memory_pressure = 1; + page_counter_charge(&memcg->tcp_mem.memory_allocated, nr_pages); + memcg->tcp_mem.memory_pressure = 1; return false; } /** * mem_cgroup_uncharge_skmem - uncharge socket memory - * @proto - proto to uncharge + * @memcg - memcg to uncharge * @nr_pages - number of pages to uncharge */ -void mem_cgroup_uncharge_skmem(struct cg_proto *proto, unsigned int nr_pages) +void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) { - page_counter_uncharge(&proto->memory_allocated, nr_pages); + page_counter_uncharge(&memcg->tcp_mem.memory_allocated, nr_pages); } #endif @@ -3653,7 +3640,7 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) if (ret) return ret; - return mem_cgroup_sockets_init(memcg, ss); + return tcp_init_cgroup(memcg, ss); } static void memcg_deactivate_kmem(struct mem_cgroup *memcg) @@ -3709,7 +3696,7 @@ static void memcg_destroy_kmem(struct mem_cgroup *memcg) static_key_slow_dec(&memcg_kmem_enabled_key); WARN_ON(page_counter_read(&memcg->kmem)); } - mem_cgroup_sockets_destroy(memcg); + tcp_destroy_cgroup(memcg); } #else static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) diff --git a/net/core/sock.c b/net/core/sock.c index 89ae859d2dc5..3535bffa45f3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -195,44 +195,6 @@ bool sk_net_capable(const struct sock *sk, int cap) } EXPORT_SYMBOL(sk_net_capable); - -#ifdef CONFIG_MEMCG_KMEM -int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) -{ - struct proto *proto; - int ret = 0; - - mutex_lock(&proto_list_mutex); - list_for_each_entry(proto, &proto_list, node) { - if (proto->init_cgroup) { - ret = proto->init_cgroup(memcg, ss); - if (ret) - goto out; - } - } - - mutex_unlock(&proto_list_mutex); - return ret; -out: - list_for_each_entry_continue_reverse(proto, &proto_list, node) - if (proto->destroy_cgroup) - proto->destroy_cgroup(memcg); - mutex_unlock(&proto_list_mutex); - return ret; -} - -void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) -{ - struct proto *proto; - - mutex_lock(&proto_list_mutex); - list_for_each_entry_reverse(proto, &proto_list, node) - if (proto->destroy_cgroup) - proto->destroy_cgroup(memcg); - mutex_unlock(&proto_list_mutex); -} -#endif - /* * Each address family might have different locking rules, so we have * one slock key per address family: @@ -1601,7 +1563,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) sk_set_socket(newsk, NULL); newsk->sk_wq = NULL; - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) + if (mem_cgroup_sockets_enabled && sk->sk_memcg) sock_update_memcg(newsk); if (newsk->sk_prot->sockets_allocated) @@ -2089,8 +2051,8 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind) allocated = sk_memory_allocated_add(sk, amt); - if (mem_cgroup_sockets_enabled && sk->sk_cgrp && - !mem_cgroup_charge_skmem(sk->sk_cgrp, amt)) + if (mem_cgroup_sockets_enabled && sk->sk_memcg && + !mem_cgroup_charge_skmem(sk->sk_memcg, amt)) goto suppress_allocation; /* Under limit. */ @@ -2153,8 +2115,8 @@ suppress_allocation: sk_memory_allocated_sub(sk, amt); - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - mem_cgroup_uncharge_skmem(sk->sk_cgrp, amt); + if (mem_cgroup_sockets_enabled && sk->sk_memcg) + mem_cgroup_uncharge_skmem(sk->sk_memcg, amt); return 0; } @@ -2171,8 +2133,8 @@ void __sk_mem_reclaim(struct sock *sk, int amount) sk_memory_allocated_sub(sk, amount); sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT; - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - mem_cgroup_uncharge_skmem(sk->sk_cgrp, amount); + if (mem_cgroup_sockets_enabled && sk->sk_memcg) + mem_cgroup_uncharge_skmem(sk->sk_memcg, amount); if (sk_under_memory_pressure(sk) && (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index eb39e02899e5..c7d1fb50f381 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1819,7 +1819,7 @@ void tcp_v4_destroy_sock(struct sock *sk) sk_sockets_allocated_dec(sk); - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) + if (mem_cgroup_sockets_enabled && sk->sk_memcg) sock_release_memcg(sk); } EXPORT_SYMBOL(tcp_v4_destroy_sock); @@ -2343,11 +2343,6 @@ struct proto tcp_prot = { #ifdef CONFIG_COMPAT .compat_setsockopt = compat_tcp_setsockopt, .compat_getsockopt = compat_tcp_getsockopt, -#endif -#ifdef CONFIG_MEMCG_KMEM - .init_cgroup = tcp_init_cgroup, - .destroy_cgroup = tcp_destroy_cgroup, - .proto_cgroup = tcp_proto_cgroup, #endif .diag_destroy = tcp_abort, }; diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c index ef4268d12e43..e5078259cbe3 100644 --- a/net/ipv4/tcp_memcontrol.c +++ b/net/ipv4/tcp_memcontrol.c @@ -8,60 +8,47 @@ int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { + struct mem_cgroup *parent = parent_mem_cgroup(memcg); + struct page_counter *counter_parent = NULL; /* * The root cgroup does not use page_counters, but rather, * rely on the data already collected by the network * subsystem */ - struct mem_cgroup *parent = parent_mem_cgroup(memcg); - struct page_counter *counter_parent = NULL; - struct cg_proto *cg_proto, *parent_cg; - - cg_proto = tcp_prot.proto_cgroup(memcg); - if (!cg_proto) + if (memcg == root_mem_cgroup) return 0; - cg_proto->memory_pressure = 0; - cg_proto->memcg = memcg; + memcg->tcp_mem.memory_pressure = 0; - parent_cg = tcp_prot.proto_cgroup(parent); - if (parent_cg) - counter_parent = &parent_cg->memory_allocated; + if (parent) + counter_parent = &parent->tcp_mem.memory_allocated; - page_counter_init(&cg_proto->memory_allocated, counter_parent); + page_counter_init(&memcg->tcp_mem.memory_allocated, counter_parent); return 0; } -EXPORT_SYMBOL(tcp_init_cgroup); void tcp_destroy_cgroup(struct mem_cgroup *memcg) { - struct cg_proto *cg_proto; - - cg_proto = tcp_prot.proto_cgroup(memcg); - if (!cg_proto) + if (memcg == root_mem_cgroup) return; - if (cg_proto->active) + if (memcg->tcp_mem.active) static_key_slow_dec(&memcg_socket_limit_enabled); - } -EXPORT_SYMBOL(tcp_destroy_cgroup); static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages) { - struct cg_proto *cg_proto; int ret; - cg_proto = tcp_prot.proto_cgroup(memcg); - if (!cg_proto) + if (memcg == root_mem_cgroup) return -EINVAL; - ret = page_counter_limit(&cg_proto->memory_allocated, nr_pages); + ret = page_counter_limit(&memcg->tcp_mem.memory_allocated, nr_pages); if (ret) return ret; - if (!cg_proto->active) { + if (!memcg->tcp_mem.active) { /* * The active flag needs to be written after the static_key * update. This is what guarantees that the socket activation @@ -79,7 +66,7 @@ static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages) * patched in yet. */ static_key_slow_inc(&memcg_socket_limit_enabled); - cg_proto->active = true; + memcg->tcp_mem.active = true; } return 0; @@ -123,32 +110,32 @@ static ssize_t tcp_cgroup_write(struct kernfs_open_file *of, static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); - struct cg_proto *cg_proto = tcp_prot.proto_cgroup(memcg); u64 val; switch (cft->private) { case RES_LIMIT: - if (!cg_proto) - return PAGE_COUNTER_MAX; - val = cg_proto->memory_allocated.limit; + if (memcg == root_mem_cgroup) + val = PAGE_COUNTER_MAX; + else + val = memcg->tcp_mem.memory_allocated.limit; val *= PAGE_SIZE; break; case RES_USAGE: - if (!cg_proto) + if (memcg == root_mem_cgroup) val = atomic_long_read(&tcp_memory_allocated); else - val = page_counter_read(&cg_proto->memory_allocated); + val = page_counter_read(&memcg->tcp_mem.memory_allocated); val *= PAGE_SIZE; break; case RES_FAILCNT: - if (!cg_proto) + if (memcg == root_mem_cgroup) return 0; - val = cg_proto->memory_allocated.failcnt; + val = memcg->tcp_mem.memory_allocated.failcnt; break; case RES_MAX_USAGE: - if (!cg_proto) + if (memcg == root_mem_cgroup) return 0; - val = cg_proto->memory_allocated.watermark; + val = memcg->tcp_mem.memory_allocated.watermark; val *= PAGE_SIZE; break; default: @@ -161,19 +148,17 @@ static ssize_t tcp_cgroup_reset(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct mem_cgroup *memcg; - struct cg_proto *cg_proto; memcg = mem_cgroup_from_css(of_css(of)); - cg_proto = tcp_prot.proto_cgroup(memcg); - if (!cg_proto) + if (memcg == root_mem_cgroup) return nbytes; switch (of_cft(of)->private) { case RES_MAX_USAGE: - page_counter_reset_watermark(&cg_proto->memory_allocated); + page_counter_reset_watermark(&memcg->tcp_mem.memory_allocated); break; case RES_FAILCNT: - cg_proto->memory_allocated.failcnt = 0; + memcg->tcp_mem.memory_allocated.failcnt = 0; break; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 493b48945f0c..fda379cd600d 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2821,8 +2821,8 @@ void sk_forced_mem_schedule(struct sock *sk, int size) sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; sk_memory_allocated_add(sk, amt); - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - mem_cgroup_charge_skmem(sk->sk_cgrp, amt); + if (mem_cgroup_sockets_enabled && sk->sk_memcg) + mem_cgroup_charge_skmem(sk->sk_memcg, amt); } /* Send a FIN. The caller locks the socket for us. diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index db9f1c318afc..4ad8edb46f7c 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1888,9 +1888,6 @@ struct proto tcpv6_prot = { #ifdef CONFIG_COMPAT .compat_setsockopt = compat_tcp_setsockopt, .compat_getsockopt = compat_tcp_getsockopt, -#endif -#ifdef CONFIG_MEMCG_KMEM - .proto_cgroup = tcp_proto_cgroup, #endif .clear_sk = tcp_v6_clear_sk, .diag_destroy = tcp_abort, -- cgit v1.2.3