summaryrefslogtreecommitdiff
path: root/include/net/sock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h85
1 files changed, 51 insertions, 34 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index ab186b1d31ff..d882f4c8e438 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -57,7 +57,6 @@
#include <linux/page_counter.h>
#include <linux/memcontrol.h>
#include <linux/static_key.h>
-#include <linux/aio.h>
#include <linux/sched.h>
#include <linux/filter.h>
@@ -67,6 +66,7 @@
#include <linux/atomic.h>
#include <net/dst.h>
#include <net/checksum.h>
+#include <net/tcp_states.h>
#include <linux/net_tstamp.h>
struct cgroup;
@@ -184,21 +184,22 @@ struct sock_common {
unsigned char skc_reuse:4;
unsigned char skc_reuseport:1;
unsigned char skc_ipv6only:1;
+ unsigned char skc_net_refcnt:1;
int skc_bound_dev_if;
union {
struct hlist_node skc_bind_node;
struct hlist_nulls_node skc_portaddr_node;
};
struct proto *skc_prot;
-#ifdef CONFIG_NET_NS
- struct net *skc_net;
-#endif
+ possible_net_t skc_net;
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr skc_v6_daddr;
struct in6_addr skc_v6_rcv_saddr;
#endif
+ atomic64_t skc_cookie;
+
/*
* fields between dontcopy_begin/dontcopy_end
* are not copied in sock_copy()
@@ -323,12 +324,14 @@ struct sock {
#define sk_reuse __sk_common.skc_reuse
#define sk_reuseport __sk_common.skc_reuseport
#define sk_ipv6only __sk_common.skc_ipv6only
+#define sk_net_refcnt __sk_common.skc_net_refcnt
#define sk_bound_dev_if __sk_common.skc_bound_dev_if
#define sk_bind_node __sk_common.skc_bind_node
#define sk_prot __sk_common.skc_prot
#define sk_net __sk_common.skc_net
#define sk_v6_daddr __sk_common.skc_v6_daddr
#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
+#define sk_cookie __sk_common.skc_cookie
socket_lock_t sk_lock;
struct sk_buff_head sk_receive_queue;
@@ -403,8 +406,8 @@ struct sock {
rwlock_t sk_callback_lock;
int sk_err,
sk_err_soft;
- unsigned short sk_ack_backlog;
- unsigned short sk_max_ack_backlog;
+ u32 sk_ack_backlog;
+ u32 sk_max_ack_backlog;
__u32 sk_priority;
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
__u32 sk_cgrp_prioidx;
@@ -958,10 +961,9 @@ struct proto {
int (*compat_ioctl)(struct sock *sk,
unsigned int cmd, unsigned long arg);
#endif
- int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len);
- int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg,
+ int (*sendmsg)(struct sock *sk, struct msghdr *msg,
+ size_t len);
+ int (*recvmsg)(struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags,
int *addr_len);
int (*sendpage)(struct sock *sk, struct page *page,
@@ -1514,9 +1516,8 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
- struct proto *prot);
+ struct proto *prot, int kern);
void sk_free(struct sock *sk);
-void sk_release_kernel(struct sock *sk);
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
@@ -1562,9 +1563,8 @@ int sock_no_listen(struct socket *, int);
int sock_no_shutdown(struct socket *, int);
int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
-int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t);
-int sock_no_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
- int);
+int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
+int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
int sock_no_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma);
ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
@@ -1576,8 +1576,8 @@ ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
*/
int sock_common_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen);
-int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags);
+int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags);
int sock_common_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen);
int compat_sock_common_getsockopt(struct socket *sock, int level,
@@ -1626,7 +1626,7 @@ static inline void sock_put(struct sock *sk)
sk_free(sk);
}
/* Generic version of sock_put(), dealing with all sockets
- * (TCP_TIMEWAIT, ESTABLISHED...)
+ * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...)
*/
void sock_gen_put(struct sock *sk);
@@ -1762,6 +1762,8 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
+bool sk_mc_loop(struct sock *sk);
+
static inline bool sk_can_gso(const struct sock *sk)
{
return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
@@ -2078,6 +2080,29 @@ static inline int sock_intr_errno(long timeo)
return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
}
+struct sock_skb_cb {
+ u32 dropcount;
+};
+
+/* Store sock_skb_cb at the end of skb->cb[] so protocol families
+ * using skb->cb[] would keep using it directly and utilize its
+ * alignement guarantee.
+ */
+#define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \
+ sizeof(struct sock_skb_cb)))
+
+#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
+ SOCK_SKB_CB_OFFSET))
+
+#define sock_skb_cb_check_size(size) \
+ BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
+
+static inline void
+sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
+{
+ SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops);
+}
+
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
@@ -2168,22 +2193,6 @@ void sock_net_set(struct sock *sk, struct net *net)
write_pnet(&sk->sk_net, net);
}
-/*
- * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace.
- * They should not hold a reference to a namespace in order to allow
- * to stop it.
- * Sockets after sk_change_net should be released using sk_release_kernel
- */
-static inline void sk_change_net(struct sock *sk, struct net *net)
-{
- struct net *current_net = sock_net(sk);
-
- if (!net_eq(current_net, net)) {
- put_net(current_net);
- sock_net_set(sk, hold_net(net));
- }
-}
-
static inline struct sock *skb_steal_sock(struct sk_buff *skb)
{
if (skb->sk) {
@@ -2196,6 +2205,14 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb)
return NULL;
}
+/* This helper checks if a socket is a full socket,
+ * ie _not_ a timewait or request socket.
+ */
+static inline bool sk_fullsock(const struct sock *sk)
+{
+ return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
+}
+
void sock_enable_timestamp(struct sock *sk, int flag);
int sock_get_timestamp(struct sock *, struct timeval __user *);
int sock_get_timestampns(struct sock *, struct timespec __user *);