summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_fd.c2
-rw-r--r--net/ceph/Kconfig2
-rw-r--r--net/ceph/ceph_hash.c2
-rw-r--r--net/ceph/crush/hash.c2
-rw-r--r--net/ceph/crush/mapper.c2
-rw-r--r--net/ceph/debugfs.c3
-rw-r--r--net/ceph/osd_client.c43
-rw-r--r--net/core/bpf_sk_storage.c37
-rw-r--r--net/core/net-sysfs.c12
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c25
-rw-r--r--net/ipv4/bpfilter/sockopt.c14
-rw-r--r--net/ipv4/inet_connection_sock.c97
-rw-r--r--net/ipv4/inet_hashtables.c1
-rw-r--r--net/ipv4/sysctl_net_ipv4.c16
-rw-r--r--net/ipv4/tcp.c16
-rw-r--r--net/ipv4/tcp_fastopen.c23
-rw-r--r--net/mptcp/subflow.c6
-rw-r--r--net/nfc/rawsock.c7
-rw-r--r--net/openvswitch/datapath.c10
-rw-r--r--net/openvswitch/flow_table.c35
-rw-r--r--net/openvswitch/flow_table.h3
-rw-r--r--net/packet/af_packet.c9
-rw-r--r--net/socket.c23
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/sunrpc/xprt.c9
-rw-r--r--net/tls/tls_device.c3
-rw-r--r--net/tls/tls_sw.c3
-rw-r--r--net/vmw_vsock/af_vsock.c2
29 files changed, 233 insertions, 178 deletions
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 12ecacf0c55f..c0762a302162 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -950,7 +950,7 @@ static int p9_bind_privport(struct socket *sock)
memset(&cl, 0, sizeof(cl));
cl.sin_family = AF_INET;
- cl.sin_addr.s_addr = INADDR_ANY;
+ cl.sin_addr.s_addr = htonl(INADDR_ANY);
for (port = p9_ipport_resv_max; port >= p9_ipport_resv_min; port--) {
cl.sin_port = htons((ushort)port);
err = kernel_bind(sock, (struct sockaddr *)&cl, sizeof(cl));
diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig
index d7bec7adc267..f36f9a3a4e20 100644
--- a/net/ceph/Kconfig
+++ b/net/ceph/Kconfig
@@ -13,7 +13,7 @@ config CEPH_LIB
common functionality to both the Ceph filesystem and
to the rados block device (rbd).
- More information at http://ceph.newdream.net/.
+ More information at https://ceph.io/.
If unsure, say N.
diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c
index 9a5850f264ed..81e1e006c540 100644
--- a/net/ceph/ceph_hash.c
+++ b/net/ceph/ceph_hash.c
@@ -4,7 +4,7 @@
/*
* Robert Jenkin's hash function.
- * http://burtleburtle.net/bob/hash/evahash.html
+ * https://burtleburtle.net/bob/hash/evahash.html
* This is in the public domain.
*/
#define mix(a, b, c) \
diff --git a/net/ceph/crush/hash.c b/net/ceph/crush/hash.c
index e5cc603cdb17..fe79f6d2d0db 100644
--- a/net/ceph/crush/hash.c
+++ b/net/ceph/crush/hash.c
@@ -7,7 +7,7 @@
/*
* Robert Jenkins' function for mixing 32-bit values
- * http://burtleburtle.net/bob/hash/evahash.html
+ * https://burtleburtle.net/bob/hash/evahash.html
* a, b = random bits, c = input and output
*/
#define crush_hashmix(a, b, c) do { \
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 3f323ed9df52..07e5614eb3f1 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -298,7 +298,7 @@ static __u64 crush_ln(unsigned int xin)
*
* for reference, see:
*
- * http://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables
+ * https://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables
*
*/
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 409d505ff320..2110439f8a24 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -223,6 +223,9 @@ static void dump_request(struct seq_file *s, struct ceph_osd_request *req)
if (op->op == CEPH_OSD_OP_WATCH)
seq_printf(s, "-%s",
ceph_osd_watch_op_name(op->watch.op));
+ else if (op->op == CEPH_OSD_OP_CALL)
+ seq_printf(s, "-%s/%s", op->cls.class_name,
+ op->cls.method_name);
}
seq_putc(s, '\n');
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 2db8b44e70c2..e4fbcad6e7d8 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -525,7 +525,7 @@ EXPORT_SYMBOL(ceph_osdc_put_request);
static void request_init(struct ceph_osd_request *req)
{
- /* req only, each op is zeroed in _osd_req_op_init() */
+ /* req only, each op is zeroed in osd_req_op_init() */
memset(req, 0, sizeof(*req));
kref_init(&req->r_kref);
@@ -746,8 +746,8 @@ EXPORT_SYMBOL(ceph_osdc_alloc_messages);
* other information associated with them. It also serves as a
* common init routine for all the other init functions, below.
*/
-static struct ceph_osd_req_op *
-_osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
+struct ceph_osd_req_op *
+osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
u16 opcode, u32 flags)
{
struct ceph_osd_req_op *op;
@@ -762,12 +762,6 @@ _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
return op;
}
-
-void osd_req_op_init(struct ceph_osd_request *osd_req,
- unsigned int which, u16 opcode, u32 flags)
-{
- (void)_osd_req_op_init(osd_req, which, opcode, flags);
-}
EXPORT_SYMBOL(osd_req_op_init);
void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
@@ -775,8 +769,8 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
u64 offset, u64 length,
u64 truncate_size, u32 truncate_seq)
{
- struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
- opcode, 0);
+ struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which,
+ opcode, 0);
size_t payload_len = 0;
BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
@@ -822,7 +816,7 @@ void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
BUG_ON(which + 1 >= osd_req->r_num_ops);
prev_op = &osd_req->r_ops[which];
- op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
+ op = osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
/* dup previous one */
op->indata_len = prev_op->indata_len;
op->outdata_len = prev_op->outdata_len;
@@ -845,7 +839,7 @@ int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
size_t size;
int ret;
- op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0);
+ op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0);
pagelist = ceph_pagelist_alloc(GFP_NOFS);
if (!pagelist)
@@ -883,8 +877,8 @@ int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
u16 opcode, const char *name, const void *value,
size_t size, u8 cmp_op, u8 cmp_mode)
{
- struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
- opcode, 0);
+ struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which,
+ opcode, 0);
struct ceph_pagelist *pagelist;
size_t payload_len;
int ret;
@@ -928,7 +922,7 @@ static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
{
struct ceph_osd_req_op *op;
- op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
+ op = osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
op->watch.cookie = cookie;
op->watch.op = watch_opcode;
op->watch.gen = 0;
@@ -943,10 +937,9 @@ void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
u64 expected_write_size,
u32 flags)
{
- struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
- CEPH_OSD_OP_SETALLOCHINT,
- 0);
+ struct ceph_osd_req_op *op;
+ op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_SETALLOCHINT, 0);
op->alloc_hint.expected_object_size = expected_object_size;
op->alloc_hint.expected_write_size = expected_write_size;
op->alloc_hint.flags = flags;
@@ -3076,9 +3069,7 @@ static void send_linger(struct ceph_osd_linger_request *lreq)
cancel_linger_request(req);
request_reinit(req);
- ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
- ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
- req->r_flags = lreq->t.flags;
+ target_copy(&req->r_t, &lreq->t);
req->r_mtime = lreq->mtime;
mutex_lock(&lreq->lock);
@@ -4801,7 +4792,7 @@ static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
struct ceph_pagelist *pl;
int ret;
- op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
+ op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
pl = ceph_pagelist_alloc(GFP_NOIO);
if (!pl)
@@ -4870,7 +4861,7 @@ static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
struct ceph_pagelist *pl;
int ret;
- op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
+ op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
op->notify.cookie = cookie;
pl = ceph_pagelist_alloc(GFP_NOIO);
@@ -5334,8 +5325,8 @@ static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
if (IS_ERR(pages))
return PTR_ERR(pages);
- op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2,
- dst_fadvise_flags);
+ op = osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2,
+ dst_fadvise_flags);
op->copy_from.snapid = src_snapid;
op->copy_from.src_version = src_version;
op->copy_from.flags = copy_from_flags;
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index d3377c90a291..b988f48153a4 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -1384,18 +1384,39 @@ static int bpf_iter_init_sk_storage_map(void *priv_data,
return 0;
}
-static int bpf_iter_check_map(struct bpf_prog *prog,
- struct bpf_iter_aux_info *aux)
+static int bpf_iter_attach_map(struct bpf_prog *prog,
+ union bpf_iter_link_info *linfo,
+ struct bpf_iter_aux_info *aux)
{
- struct bpf_map *map = aux->map;
+ struct bpf_map *map;
+ int err = -EINVAL;
+
+ if (!linfo->map.map_fd)
+ return -EBADF;
+
+ map = bpf_map_get_with_uref(linfo->map.map_fd);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
- return -EINVAL;
+ goto put_map;
- if (prog->aux->max_rdonly_access > map->value_size)
- return -EACCES;
+ if (prog->aux->max_rdonly_access > map->value_size) {
+ err = -EACCES;
+ goto put_map;
+ }
+ aux->map = map;
return 0;
+
+put_map:
+ bpf_map_put_with_uref(map);
+ return err;
+}
+
+static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
+{
+ bpf_map_put_with_uref(aux->map);
}
static const struct seq_operations bpf_sk_storage_map_seq_ops = {
@@ -1414,8 +1435,8 @@ static const struct bpf_iter_seq_info iter_seq_info = {
static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
.target = "bpf_sk_storage_map",
- .check_target = bpf_iter_check_map,
- .req_linfo = BPF_ITER_LINK_MAP_FD,
+ .attach_target = bpf_iter_attach_map,
+ .detach_target = bpf_iter_detach_map,
.ctx_arg_info_size = 2,
.ctx_arg_info = {
{ offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 9de33b594ff2..efec66fa78b7 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -757,11 +757,13 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
return err;
}
- hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
- cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
- if (cpumask_empty(mask)) {
- free_cpumask_var(mask);
- return -EINVAL;
+ if (!cpumask_empty(mask)) {
+ hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
+ cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
+ if (cpumask_empty(mask)) {
+ free_cpumask_var(mask);
+ return -EINVAL;
+ }
}
map = kzalloc(max_t(unsigned int,
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2828f6d5ba89..7e2e502ef519 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4853,7 +4853,7 @@ static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
if (err < 0)
goto out;
- if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
+ if (ip_is_fragment(ip_hdr(skb)))
fragment = true;
off = ip_hdrlen(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index a2044b4b606b..e4f40b175acb 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3414,6 +3414,16 @@ static void sock_inuse_add(struct net *net, int val)
}
#endif
+static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
+{
+ if (!twsk_prot)
+ return;
+ kfree(twsk_prot->twsk_slab_name);
+ twsk_prot->twsk_slab_name = NULL;
+ kmem_cache_destroy(twsk_prot->twsk_slab);
+ twsk_prot->twsk_slab = NULL;
+}
+
static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
{
if (!rsk_prot)
@@ -3484,7 +3494,7 @@ int proto_register(struct proto *prot, int alloc_slab)
prot->slab_flags,
NULL);
if (prot->twsk_prot->twsk_slab == NULL)
- goto out_free_timewait_sock_slab_name;
+ goto out_free_timewait_sock_slab;
}
}
@@ -3492,15 +3502,15 @@ int proto_register(struct proto *prot, int alloc_slab)
ret = assign_proto_idx(prot);
if (ret) {
mutex_unlock(&proto_list_mutex);
- goto out_free_timewait_sock_slab_name;
+ goto out_free_timewait_sock_slab;
}
list_add(&prot->node, &proto_list);
mutex_unlock(&proto_list_mutex);
return ret;
-out_free_timewait_sock_slab_name:
+out_free_timewait_sock_slab:
if (alloc_slab && prot->twsk_prot)
- kfree(prot->twsk_prot->twsk_slab_name);
+ tw_prot_cleanup(prot->twsk_prot);
out_free_request_sock_slab:
if (alloc_slab) {
req_prot_cleanup(prot->rsk_prot);
@@ -3524,12 +3534,7 @@ void proto_unregister(struct proto *prot)
prot->slab = NULL;
req_prot_cleanup(prot->rsk_prot);
-
- if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
- kmem_cache_destroy(prot->twsk_prot->twsk_slab);
- kfree(prot->twsk_prot->twsk_slab_name);
- prot->twsk_prot->twsk_slab = NULL;
- }
+ tw_prot_cleanup(prot->twsk_prot);
}
EXPORT_SYMBOL(proto_unregister);
diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c
index 545b2640f019..1b34cb9a7708 100644
--- a/net/ipv4/bpfilter/sockopt.c
+++ b/net/ipv4/bpfilter/sockopt.c
@@ -57,18 +57,16 @@ int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval,
return bpfilter_mbox_request(sk, optname, optval, optlen, true);
}
-int bpfilter_ip_get_sockopt(struct sock *sk, int optname,
- char __user *user_optval, int __user *optlen)
+int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
+ int __user *optlen)
{
- sockptr_t optval;
- int err, len;
+ int len;
if (get_user(len, optlen))
return -EFAULT;
- err = init_user_sockptr(&optval, user_optval, len);
- if (err)
- return err;
- return bpfilter_mbox_request(sk, optname, optval, len, false);
+
+ return bpfilter_mbox_request(sk, optname, USER_SOCKPTR(optval), len,
+ false);
}
static int __init bpfilter_sockopt_init(void)
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index d1a3913eebe0..b457dd2d6c75 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -296,6 +296,57 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
ipv6_only_sock(sk), true, false);
}
+void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
+ struct sock *sk)
+{
+ kuid_t uid = sock_i_uid(sk);
+ bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
+
+ if (hlist_empty(&tb->owners)) {
+ tb->fastreuse = reuse;
+ if (sk->sk_reuseport) {
+ tb->fastreuseport = FASTREUSEPORT_ANY;
+ tb->fastuid = uid;
+ tb->fast_rcv_saddr = sk->sk_rcv_saddr;
+ tb->fast_ipv6_only = ipv6_only_sock(sk);
+ tb->fast_sk_family = sk->sk_family;
+#if IS_ENABLED(CONFIG_IPV6)
+ tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+#endif
+ } else {
+ tb->fastreuseport = 0;
+ }
+ } else {
+ if (!reuse)
+ tb->fastreuse = 0;
+ if (sk->sk_reuseport) {
+ /* We didn't match or we don't have fastreuseport set on
+ * the tb, but we have sk_reuseport set on this socket
+ * and we know that there are no bind conflicts with
+ * this socket in this tb, so reset our tb's reuseport
+ * settings so that any subsequent sockets that match
+ * our current socket will be put on the fast path.
+ *
+ * If we reset we need to set FASTREUSEPORT_STRICT so we
+ * do extra checking for all subsequent sk_reuseport
+ * socks.
+ */
+ if (!sk_reuseport_match(tb, sk)) {
+ tb->fastreuseport = FASTREUSEPORT_STRICT;
+ tb->fastuid = uid;
+ tb->fast_rcv_saddr = sk->sk_rcv_saddr;
+ tb->fast_ipv6_only = ipv6_only_sock(sk);
+ tb->fast_sk_family = sk->sk_family;
+#if IS_ENABLED(CONFIG_IPV6)
+ tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+#endif
+ }
+ } else {
+ tb->fastreuseport = 0;
+ }
+ }
+}
+
/* Obtain a reference to a local port for the given sock,
* if snum is zero it means select any available local port.
* We try to allocate an odd port (and leave even ports for connect())
@@ -308,7 +359,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
struct inet_bind_hashbucket *head;
struct net *net = sock_net(sk);
struct inet_bind_bucket *tb = NULL;
- kuid_t uid = sock_i_uid(sk);
int l3mdev;
l3mdev = inet_sk_bound_l3mdev(sk);
@@ -345,49 +395,8 @@ tb_found:
goto fail_unlock;
}
success:
- if (hlist_empty(&tb->owners)) {
- tb->fastreuse = reuse;
- if (sk->sk_reuseport) {
- tb->fastreuseport = FASTREUSEPORT_ANY;
- tb->fastuid = uid;
- tb->fast_rcv_saddr = sk->sk_rcv_saddr;
- tb->fast_ipv6_only = ipv6_only_sock(sk);
- tb->fast_sk_family = sk->sk_family;
-#if IS_ENABLED(CONFIG_IPV6)
- tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
-#endif
- } else {
- tb->fastreuseport = 0;
- }
- } else {
- if (!reuse)
- tb->fastreuse = 0;
- if (sk->sk_reuseport) {
- /* We didn't match or we don't have fastreuseport set on
- * the tb, but we have sk_reuseport set on this socket
- * and we know that there are no bind conflicts with
- * this socket in this tb, so reset our tb's reuseport
- * settings so that any subsequent sockets that match
- * our current socket will be put on the fast path.
- *
- * If we reset we need to set FASTREUSEPORT_STRICT so we
- * do extra checking for all subsequent sk_reuseport
- * socks.
- */
- if (!sk_reuseport_match(tb, sk)) {
- tb->fastreuseport = FASTREUSEPORT_STRICT;
- tb->fastuid = uid;
- tb->fast_rcv_saddr = sk->sk_rcv_saddr;
- tb->fast_ipv6_only = ipv6_only_sock(sk);
- tb->fast_sk_family = sk->sk_family;
-#if IS_ENABLED(CONFIG_IPV6)
- tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
-#endif
- }
- } else {
- tb->fastreuseport = 0;
- }
- }
+ inet_csk_update_fastreuse(tb, sk);
+
if (!inet_csk(sk)->icsk_bind_hash)
inet_bind_hash(sk, tb, port);
WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 4eb4cd8d20dd..239e54474b65 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -163,6 +163,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
return -ENOMEM;
}
}
+ inet_csk_update_fastreuse(tb, child);
}
inet_bind_hash(child, tb, port);
spin_unlock(&head->lock);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 5653e3b011bf..54023a46db04 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -301,24 +301,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
struct ctl_table tbl = { .maxlen = ((TCP_FASTOPEN_KEY_LENGTH *
2 * TCP_FASTOPEN_KEY_MAX) +
(TCP_FASTOPEN_KEY_MAX * 5)) };
- struct tcp_fastopen_context *ctx;
- u32 user_key[TCP_FASTOPEN_KEY_MAX * 4];
- __le32 key[TCP_FASTOPEN_KEY_MAX * 4];
+ u32 user_key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u32)];
+ __le32 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(__le32)];
char *backup_data;
- int ret, i = 0, off = 0, n_keys = 0;
+ int ret, i = 0, off = 0, n_keys;
tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
if (!tbl.data)
return -ENOMEM;
- rcu_read_lock();
- ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
- if (ctx) {
- n_keys = tcp_fastopen_context_len(ctx);
- memcpy(&key[0], &ctx->key[0], TCP_FASTOPEN_KEY_LENGTH * n_keys);
- }
- rcu_read_unlock();
-
+ n_keys = tcp_fastopen_get_cipher(net, NULL, (u64 *)key);
if (!n_keys) {
memset(&key[0], 0, TCP_FASTOPEN_KEY_LENGTH);
n_keys = 1;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c06d2bfd2ec4..31f3b858db81 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3685,22 +3685,14 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
return 0;
case TCP_FASTOPEN_KEY: {
- __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH];
- struct tcp_fastopen_context *ctx;
- unsigned int key_len = 0;
+ u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)];
+ unsigned int key_len;
if (get_user(len, optlen))
return -EFAULT;
- rcu_read_lock();
- ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
- if (ctx) {
- key_len = tcp_fastopen_context_len(ctx) *
- TCP_FASTOPEN_KEY_LENGTH;
- memcpy(&key[0], &ctx->key[0], key_len);
- }
- rcu_read_unlock();
-
+ key_len = tcp_fastopen_get_cipher(net, icsk, key) *
+ TCP_FASTOPEN_KEY_LENGTH;
len = min_t(unsigned int, len, key_len);
if (put_user(len, optlen))
return -EFAULT;
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index c1a54f3d58f5..09b62de04eea 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -108,6 +108,29 @@ out:
return err;
}
+int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
+ u64 *key)
+{
+ struct tcp_fastopen_context *ctx;
+ int n_keys = 0, i;
+
+ rcu_read_lock();
+ if (icsk)
+ ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
+ else
+ ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
+ if (ctx) {
+ n_keys = tcp_fastopen_context_len(ctx);
+ for (i = 0; i < n_keys; i++) {
+ put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
+ put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
+ }
+ }
+ rcu_read_unlock();
+
+ return n_keys;
+}
+
static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
struct sk_buff *syn,
const siphash_key_t *key,
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 96f4f2fe50ad..e8cac2655c82 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -423,12 +423,12 @@ static void mptcp_sock_destruct(struct sock *sk)
* also remove the mptcp socket, via
* sock_put(ctx->conn).
*
- * Problem is that the mptcp socket will not be in
- * SYN_RECV state and doesn't have SOCK_DEAD flag.
+ * Problem is that the mptcp socket will be in
+ * ESTABLISHED state and will not have the SOCK_DEAD flag.
* Both result in warnings from inet_sock_destruct.
*/
- if (sk->sk_state == TCP_SYN_RECV) {
+ if (sk->sk_state == TCP_ESTABLISHED) {
sk->sk_state = TCP_CLOSE;
WARN_ON_ONCE(sk->sk_socket);
sock_orphan(sk);
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index b2061b6746ea..955c195ae14b 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -328,10 +328,13 @@ static int rawsock_create(struct net *net, struct socket *sock,
if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW))
return -ESOCKTNOSUPPORT;
- if (sock->type == SOCK_RAW)
+ if (sock->type == SOCK_RAW) {
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
sock->ops = &rawsock_raw_ops;
- else
+ } else {
sock->ops = &rawsock_ops;
+ }
sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern);
if (!sk)
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 42f8cc70bb2c..6e47ef7ef036 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1756,6 +1756,7 @@ err:
/* Called with ovs_mutex. */
static void __dp_destroy(struct datapath *dp)
{
+ struct flow_table *table = &dp->table;
int i;
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
@@ -1774,7 +1775,14 @@ static void __dp_destroy(struct datapath *dp)
*/
ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
- /* RCU destroy the flow table */
+ /* Flush sw_flow in the tables. RCU cb only releases resource
+ * such as dp, ports and tables. That may avoid some issues
+ * such as RCU usage warning.
+ */
+ table_instance_flow_flush(table, ovsl_dereference(table->ti),
+ ovsl_dereference(table->ufid_ti));
+
+ /* RCU destroy the ports, meters and flow tables. */
call_rcu(&dp->rcu, destroy_dp_rcu);
}
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 8c12675cbb67..e2235849a57e 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -473,19 +473,15 @@ static void table_instance_flow_free(struct flow_table *table,
flow_mask_remove(table, flow->mask);
}
-static void table_instance_destroy(struct flow_table *table,
- struct table_instance *ti,
- struct table_instance *ufid_ti,
- bool deferred)
+/* Must be called with OVS mutex held. */
+void table_instance_flow_flush(struct flow_table *table,
+ struct table_instance *ti,
+ struct table_instance *ufid_ti)
{
int i;
- if (!ti)
- return;
-
- BUG_ON(!ufid_ti);
if (ti->keep_flows)
- goto skip_flows;
+ return;
for (i = 0; i < ti->n_buckets; i++) {
struct sw_flow *flow;
@@ -497,18 +493,16 @@ static void table_instance_destroy(struct flow_table *table,
table_instance_flow_free(table, ti, ufid_ti,
flow, false);
- ovs_flow_free(flow, deferred);
+ ovs_flow_free(flow, true);
}
}
+}
-skip_flows:
- if (deferred) {
- call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
- call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
- } else {
- __table_instance_destroy(ti);
- __table_instance_destroy(ufid_ti);
- }
+static void table_instance_destroy(struct table_instance *ti,
+ struct table_instance *ufid_ti)
+{
+ call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
+ call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
}
/* No need for locking this function is called from RCU callback or
@@ -523,7 +517,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
call_rcu(&mc->rcu, mask_cache_rcu_cb);
call_rcu(&ma->rcu, mask_array_rcu_cb);
- table_instance_destroy(table, ti, ufid_ti, false);
+ table_instance_destroy(ti, ufid_ti);
}
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
@@ -641,7 +635,8 @@ int ovs_flow_tbl_flush(struct flow_table *flow_table)
flow_table->count = 0;
flow_table->ufid_count = 0;
- table_instance_destroy(flow_table, old_ti, old_ufid_ti, true);
+ table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
+ table_instance_destroy(old_ti, old_ufid_ti);
return 0;
err_free_ti:
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 74ce48fecba9..6e7d4ac59353 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -105,5 +105,8 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
bool full, const struct sw_flow_mask *mask);
void ovs_flow_masks_rebalance(struct flow_table *table);
+void table_instance_flow_flush(struct flow_table *table,
+ struct table_instance *ti,
+ struct table_instance *ufid_ti);
#endif /* flow_table.h */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 0b8160d1a6e0..479c257ded73 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -941,6 +941,7 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
}
static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
+ __releases(&pkc->blk_fill_in_prog_lock)
{
struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
@@ -989,6 +990,7 @@ static void prb_fill_curr_block(char *curr,
struct tpacket_kbdq_core *pkc,
struct tpacket_block_desc *pbd,
unsigned int len)
+ __acquires(&pkc->blk_fill_in_prog_lock)
{
struct tpacket3_hdr *ppd;
@@ -2286,8 +2288,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
if (do_vnet &&
virtio_net_hdr_from_skb(skb, h.raw + macoff -
sizeof(struct virtio_net_hdr),
- vio_le(), true, 0))
+ vio_le(), true, 0)) {
+ if (po->tp_version == TPACKET_V3)
+ prb_clear_blk_fill_status(&po->rx_ring);
goto drop_n_account;
+ }
if (po->tp_version <= TPACKET_V2) {
packet_increment_rx_head(po, &po->rx_ring);
@@ -2393,7 +2398,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
__clear_bit(slot_id, po->rx_ring.rx_owner_map);
spin_unlock(&sk->sk_receive_queue.lock);
sk->sk_data_ready(sk);
- } else {
+ } else if (po->tp_version == TPACKET_V3) {
prb_clear_blk_fill_status(&po->rx_ring);
}
diff --git a/net/socket.c b/net/socket.c
index aff52e81653c..dbbe8ea7d395 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -500,7 +500,7 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
if (f.file) {
sock = sock_from_file(f.file, err);
if (likely(sock)) {
- *fput_needed = f.flags;
+ *fput_needed = f.flags & FDPUT_FPUT;
return sock;
}
fdput(f);
@@ -1325,7 +1325,7 @@ int sock_wake_async(struct socket_wq *wq, int how, int band)
case SOCK_WAKE_SPACE:
if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags))
break;
- /* fall through */
+ fallthrough;
case SOCK_WAKE_IO:
call_kill:
kill_fasync(&wq->fasync_list, SIGIO, band);
@@ -1804,8 +1804,7 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
ret = __sys_accept4_file(f.file, 0, upeer_sockaddr,
upeer_addrlen, flags,
rlimit(RLIMIT_NOFILE));
- if (f.flags)
- fput(f.file);
+ fdput(f);
}
return ret;
@@ -1868,8 +1867,7 @@ int __sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen)
ret = move_addr_to_kernel(uservaddr, addrlen, &address);
if (!ret)
ret = __sys_connect_file(f.file, &address, addrlen, 0);
- if (f.flags)
- fput(f.file);
+ fdput(f);
}
return ret;
@@ -2097,7 +2095,7 @@ static bool sock_use_custom_sol_socket(const struct socket *sock)
int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
int optlen)
{
- sockptr_t optval;
+ sockptr_t optval = USER_SOCKPTR(user_optval);
char *kernel_optval = NULL;
int err, fput_needed;
struct socket *sock;
@@ -2105,10 +2103,6 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
if (optlen < 0)
return -EINVAL;
- err = init_user_sockptr(&optval, user_optval, optlen);
- if (err)
- return err;
-
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
return err;
@@ -3065,7 +3059,7 @@ static int __init sock_init(void)
err = register_filesystem(&sock_fs_type);
if (err)
- goto out_fs;
+ goto out;
sock_mnt = kern_mount(&sock_fs_type);
if (IS_ERR(sock_mnt)) {
err = PTR_ERR(sock_mnt);
@@ -3088,7 +3082,6 @@ out:
out_mount:
unregister_filesystem(&sock_fs_type);
-out_fs:
goto out;
}
@@ -3161,13 +3154,13 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32))
return -ENOMEM;
buf_size += rule_cnt * sizeof(u32);
- /* fall through */
+ fallthrough;
case ETHTOOL_GRXRINGS:
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_SRXCLSRLINS:
convert_out = true;
- /* fall through */
+ fallthrough;
case ETHTOOL_SRXCLSRLDEL:
buf_size += sizeof(struct ethtool_rxnfc);
convert_in = true;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index e9d0953522f0..eadc0ede928c 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1510,6 +1510,6 @@ err_notifier:
void unregister_rpc_pipefs(void)
{
rpc_clients_notifier_unregister();
- kmem_cache_destroy(rpc_inode_cachep);
unregister_filesystem(&rpc_pipe_fs_type);
+ kmem_cache_destroy(rpc_inode_cachep);
}
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index d5cc5db9dbf3..6ba9d5842629 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -607,6 +607,11 @@ static void xprt_reset_majortimeo(struct rpc_rqst *req)
req->rq_majortimeo += xprt_calc_majortimeo(req);
}
+static void xprt_reset_minortimeo(struct rpc_rqst *req)
+{
+ req->rq_minortimeo += req->rq_timeout;
+}
+
static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
{
unsigned long time_init;
@@ -618,6 +623,7 @@ static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
req->rq_timeout = task->tk_client->cl_timeout->to_initval;
req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
+ req->rq_minortimeo = time_init + req->rq_timeout;
}
/**
@@ -631,6 +637,8 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
int status = 0;
+ if (time_before(jiffies, req->rq_minortimeo))
+ return status;
if (time_before(jiffies, req->rq_majortimeo)) {
if (to->to_exponential)
req->rq_timeout <<= 1;
@@ -649,6 +657,7 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
spin_unlock(&xprt->transport_lock);
status = -ETIMEDOUT;
}
+ xprt_reset_minortimeo(req);
if (req->rq_timeout == 0) {
printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 18fa6067bb7f..b74e2741f74f 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -561,7 +561,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct iov_iter msg_iter;
- char *kaddr = kmap(page);
+ char *kaddr;
struct kvec iov;
int rc;
@@ -576,6 +576,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
goto out;
}
+ kaddr = kmap(page);
iov.iov_base = kaddr + offset;
iov.iov_len = size;
iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 710bd44eaa49..9a3d9fedd7aa 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -935,7 +935,8 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
int ret = 0;
int pending;
- if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
+ if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+ MSG_CMSG_COMPAT))
return -EOPNOTSUPP;
mutex_lock(&tls_ctx->tx_lock);
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 27bbcfad9c17..9e93bc201cc0 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1032,7 +1032,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
}
/* Connected sockets that can produce data can be written. */
- if (sk->sk_state == TCP_ESTABLISHED) {
+ if (transport && sk->sk_state == TCP_ESTABLISHED) {
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
bool space_avail_now = false;
int ret = transport->notify_poll_out(