From 72a3effaf633bcae9034b7e176bdbd78d64a71db Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 16 Nov 2006 02:30:37 -0800 Subject: [NET]: Size listen hash tables using backlog hint We currently allocate a fixed size (TCP_SYNQ_HSIZE=512) slots hash table for each LISTEN socket, regardless of various parameters (listen backlog for example) On x86_64, this means order-1 allocations (might fail), even for 'small' sockets, expecting few connections. On the contrary, a huge server wanting a backlog of 50000 is slowed down a bit because of this fixed limit. This patch makes the sizing of listen hash table a dynamic parameter, depending of : - net.core.somaxconn tunable (default is 128) - net.ipv4.tcp_max_syn_backlog tunable (default : 256, 1024 or 128) - backlog value given by user application (2nd parameter of listen()) For large allocations (bigger than PAGE_SIZE), we use vmalloc() instead of kmalloc(). We still limit memory allocation with the two existing tunables (somaxconn & tcp_max_syn_backlog). So for standard setups, this patch actually reduce RAM usage. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net/ipv4/tcp_ipv4.c') diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 22ef8bd26620..5fbf96552cac 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -715,7 +715,7 @@ static struct ip_options *tcp_v4_save_options(struct sock *sk, return dopt; } -struct request_sock_ops tcp_request_sock_ops = { +struct request_sock_ops tcp_request_sock_ops __read_mostly = { .family = PF_INET, .obj_size = sizeof(struct tcp_request_sock), .rtx_syn_ack = tcp_v4_send_synack, @@ -1385,7 +1385,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) if (st->state == TCP_SEQ_STATE_OPENREQ) { struct request_sock *req = cur; - icsk = inet_csk(st->syn_wait_sk); + icsk = inet_csk(st->syn_wait_sk); req = req->dl_next; while (1) { while (req) { @@ -1395,7 +1395,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) } req = req->dl_next; } - if (++st->sbucket >= TCP_SYNQ_HSIZE) + if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries) break; get_req: req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket]; -- cgit v1.2.3 From a94f723d595ee085f81b1788d18e031af7eeba91 Mon Sep 17 00:00:00 2001 From: Gerrit Renker Date: Fri, 10 Nov 2006 14:06:49 -0800 Subject: [TCP]: Remove dead code in init_sequence This removes two redundancies: 1) The test (skb->protocol == htons(ETH_P_IPV6) in tcp_v6_init_sequence() is always true, due to * tcp_v6_conn_request() is the only function calling this one * tcp_v6_conn_request() redirects all skb's with ETH_P_IP protocol to tcp_v4_conn_request() [ cf. top of tcp_v6_conn_request()] 2) The first argument, `struct sock *sk' of tcp_v{4,6}_init_sequence() is never used. Signed-off-by: Gerrit Renker Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 4 ++-- net/ipv6/tcp_ipv6.c | 19 ++++++------------- 2 files changed, 8 insertions(+), 15 deletions(-) (limited to 'net/ipv4/tcp_ipv4.c') diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5fbf96552cac..2eb58844403c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -111,7 +111,7 @@ void tcp_unhash(struct sock *sk) inet_unhash(&tcp_hashinfo, sk); } -static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb) +static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) { return secure_tcp_sequence_number(skb->nh.iph->daddr, skb->nh.iph->saddr, @@ -859,7 +859,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) goto drop_and_free; } - isn = tcp_v4_init_sequence(sk, skb); + isn = tcp_v4_init_sequence(skb); } tcp_rsk(req)->snt_isn = isn; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 06b536b47f97..9a8e690fdf7c 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -106,19 +106,12 @@ static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len, return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); } -static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb) +static __u32 tcp_v6_init_sequence(struct sk_buff *skb) { - if (skb->protocol == htons(ETH_P_IPV6)) { - return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32, - skb->nh.ipv6h->saddr.s6_addr32, - skb->h.th->dest, - skb->h.th->source); - } else { - return secure_tcp_sequence_number(skb->nh.iph->daddr, - skb->nh.iph->saddr, - skb->h.th->dest, - skb->h.th->source); - } + return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32, + skb->nh.ipv6h->saddr.s6_addr32, + skb->h.th->dest, + skb->h.th->source); } static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, @@ -822,7 +815,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) treq->iif = inet6_iif(skb); if (isn == 0) - isn = tcp_v6_init_sequence(sk,skb); + isn = tcp_v6_init_sequence(skb); tcp_rsk(req)->snt_isn = isn; -- cgit v1.2.3 From b9df3cb8cf9a96e63dfdcd3056a9cbc71f2459e7 Mon Sep 17 00:00:00 2001 From: Gerrit Renker Date: Tue, 14 Nov 2006 11:21:36 -0200 Subject: [TCP/DCCP]: Introduce net_xmit_eval Throughout the TCP/DCCP (and tunnelling) code, it often happens that the return code of a transmit function needs to be tested against NET_XMIT_CN which is a value that does not indicate a strict error condition. This patch uses a macro for these recurring situations which is consistent with the already existing macro net_xmit_errno, saving on duplicated code. Signed-off-by: Gerrit Renker Signed-off-by: Arnaldo Carvalho de Melo --- include/linux/netdevice.h | 4 ++++ include/net/ipip.h | 2 +- net/dccp/ipv4.c | 5 ++--- net/dccp/ipv6.c | 3 +-- net/dccp/output.c | 14 ++------------ net/ipv4/tcp_ipv4.c | 3 +-- net/ipv4/tcp_output.c | 8 +------- net/ipv6/ip6_tunnel.c | 2 +- net/ipv6/tcp_ipv6.c | 3 +-- 9 files changed, 14 insertions(+), 30 deletions(-) (limited to 'net/ipv4/tcp_ipv4.c') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 4e967b2e22cc..caa3c2593719 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -66,6 +66,10 @@ struct netpoll_info; #define NET_RX_CN_HIGH 4 /* The storm is here */ #define NET_RX_BAD 5 /* packet dropped due to kernel error */ +/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It + * indicates that the device will soon be dropping packets, or already drops + * some packets of the same priority; prompting us to send less aggressively. */ +#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e)) #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) #endif diff --git a/include/net/ipip.h b/include/net/ipip.h index f490c3cbe377..84058858eea7 100644 --- a/include/net/ipip.h +++ b/include/net/ipip.h @@ -35,7 +35,7 @@ struct ip_tunnel ip_send_check(iph); \ \ err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev, dst_output);\ - if (err == NET_XMIT_SUCCESS || err == NET_XMIT_CN) { \ + if (net_xmit_eval(err) == 0) { \ stats->tx_bytes += pkt_len; \ stats->tx_packets++; \ } else { \ diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index bc400b2ba25e..61c09014dade 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -501,8 +501,7 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, ireq->rmt_addr, ireq->opt); - if (err == NET_XMIT_CN) - err = 0; + err = net_xmit_eval(err); } out: @@ -571,7 +570,7 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) rxskb->nh.iph->saddr, NULL); bh_unlock_sock(dccp_v4_ctl_socket->sk); - if (err == NET_XMIT_CN || err == 0) { + if (net_xmit_eval(err) == 0) { DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); } diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 8d6ddb6389a7..2165b1740c7c 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -294,8 +294,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, &ireq6->rmt_addr); ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); err = ip6_xmit(sk, skb, &fl, opt, 0); - if (err == NET_XMIT_CN) - err = 0; + err = net_xmit_eval(err); } done: diff --git a/net/dccp/output.c b/net/dccp/output.c index 0994b13f0f15..ef22f3cc791a 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -125,16 +125,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0); - if (err <= 0) - return err; - - /* NET_XMIT_CN is special. It does not guarantee, - * that this packet is lost. It tells that device - * is about to start to drop packets or already - * drops some packets of the same priority and - * invokes us to send less aggressively. - */ - return err == NET_XMIT_CN ? 0 : err; + return net_xmit_eval(err); } return -ENOBUFS; } @@ -426,8 +417,7 @@ int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code) if (skb != NULL) { memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, sk, 0); - if (err == NET_XMIT_CN) - err = 0; + return net_xmit_eval(err); } } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 2eb58844403c..0ad0904bf56c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -662,8 +662,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, ireq->rmt_addr, ireq->opt); - if (err == NET_XMIT_CN) - err = 0; + err = net_xmit_eval(err); } out: diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f63e99aac2d5..6a8581ab9a23 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -484,13 +484,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, tcp_enter_cwr(sk); - /* NET_XMIT_CN is special. It does not guarantee, - * that this packet is lost. It tells that device - * is about to start to drop packets or already - * drops some packets of the same priority and - * invokes us to send less aggressively. - */ - return err == NET_XMIT_CN ? 0 : err; + return net_xmit_eval(err); #undef SYSCTL_FLAG_TSTAMPS #undef SYSCTL_FLAG_WSCALE diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 4919f9294e2a..80a11909159d 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -748,7 +748,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output); - if (err == NET_XMIT_SUCCESS || err == NET_XMIT_CN) { + if (net_xmit_eval(err) == 0) { stats->tx_bytes += pkt_len; stats->tx_packets++; } else { diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 9a8e690fdf7c..9a88395a7629 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -502,8 +502,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); err = ip6_xmit(sk, skb, &fl, opt, 0); - if (err == NET_XMIT_CN) - err = 0; + err = net_xmit_eval(err); } done: -- cgit v1.2.3 From cfb6eeb4c860592edd123fdea908d23c6ad1c7dc Mon Sep 17 00:00:00 2001 From: YOSHIFUJI Hideaki Date: Tue, 14 Nov 2006 19:07:45 -0800 Subject: [TCP]: MD5 Signature Option (RFC2385) support. Based on implementation by Rick Payne. Signed-off-by: YOSHIFUJI Hideaki Signed-off-by: David S. Miller --- CREDITS | 3 + include/linux/tcp.h | 35 ++- include/net/request_sock.h | 3 +- include/net/tcp.h | 143 ++++++++++ include/net/timewait_sock.h | 3 + net/dccp/ipv4.c | 6 +- net/dccp/ipv6.c | 6 +- net/dccp/minisocks.c | 2 +- net/ipv4/Kconfig | 16 ++ net/ipv4/tcp.c | 137 +++++++++ net/ipv4/tcp_input.c | 8 + net/ipv4/tcp_ipv4.c | 673 ++++++++++++++++++++++++++++++++++++++++++-- net/ipv4/tcp_minisocks.c | 64 ++++- net/ipv4/tcp_output.c | 111 +++++++- net/ipv6/tcp_ipv6.c | 568 +++++++++++++++++++++++++++++++++++-- 15 files changed, 1714 insertions(+), 64 deletions(-) (limited to 'net/ipv4/tcp_ipv4.c') diff --git a/CREDITS b/CREDITS index ccd4f9f4dd71..d0880082c19b 100644 --- a/CREDITS +++ b/CREDITS @@ -2598,6 +2598,9 @@ S: Ucitelska 1576 S: Prague 8 S: 182 00 Czech Republic +N: Rick Payne +D: RFC2385 Support for TCP + N: Barak A. Pearlmutter E: bap@cs.unm.edu W: http://www.cs.unm.edu/~bap/ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 2d36f6db3706..0aecfc955591 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -19,6 +19,7 @@ #include #include +#include struct tcphdr { __be16 source; @@ -94,6 +95,7 @@ enum { #define TCP_INFO 11 /* Information about this connection. */ #define TCP_QUICKACK 12 /* Block/reenable quick acks */ #define TCP_CONGESTION 13 /* Congestion control algorithm */ +#define TCP_MD5SIG 14 /* TCP MD5 Signature (RFC2385) */ #define TCPI_OPT_TIMESTAMPS 1 #define TCPI_OPT_SACK 2 @@ -157,6 +159,17 @@ struct tcp_info __u32 tcpi_total_retrans; }; +/* for TCP_MD5SIG socket option */ +#define TCP_MD5SIG_MAXKEYLEN 80 + +struct tcp_md5sig { + struct __kernel_sockaddr_storage tcpm_addr; /* address associated */ + __u16 __tcpm_pad1; /* zero */ + __u16 tcpm_keylen; /* key length */ + __u32 __tcpm_pad2; /* zero */ + __u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; /* key (binary) */ +}; + #ifdef __KERNEL__ #include @@ -197,9 +210,13 @@ struct tcp_options_received { }; struct tcp_request_sock { - struct inet_request_sock req; - __u32 rcv_isn; - __u32 snt_isn; + struct inet_request_sock req; +#ifdef CONFIG_TCP_MD5SIG + /* Only used by TCP MD5 Signature so far. */ + struct tcp_request_sock_ops *af_specific; +#endif + __u32 rcv_isn; + __u32 snt_isn; }; static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) @@ -363,6 +380,14 @@ struct tcp_sock { __u32 probe_seq_start; __u32 probe_seq_end; } mtu_probe; + +#ifdef CONFIG_TCP_MD5SIG +/* TCP AF-Specific parts; only used by MD5 Signature support so far */ + struct tcp_sock_af_ops *af_specific; + +/* TCP MD5 Signagure Option information */ + struct tcp_md5sig_info *md5sig_info; +#endif }; static inline struct tcp_sock *tcp_sk(const struct sock *sk) @@ -377,6 +402,10 @@ struct tcp_timewait_sock { __u32 tw_rcv_wnd; __u32 tw_ts_recent; long tw_ts_recent_stamp; +#ifdef CONFIG_TCP_MD5SIG + __u16 tw_md5_keylen; + __u8 tw_md5_key[TCP_MD5SIG_MAXKEYLEN]; +#endif }; static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) diff --git a/include/net/request_sock.h b/include/net/request_sock.h index b5b023e79e5f..e37baaf2080b 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -35,7 +35,8 @@ struct request_sock_ops { struct dst_entry *dst); void (*send_ack)(struct sk_buff *skb, struct request_sock *req); - void (*send_reset)(struct sk_buff *skb); + void (*send_reset)(struct sock *sk, + struct sk_buff *skb); void (*destructor)(struct request_sock *req); }; diff --git a/include/net/tcp.h b/include/net/tcp.h index e1a5d29d0a1f..363960872de0 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -161,6 +162,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCPOPT_SACK_PERM 4 /* SACK Permitted */ #define TCPOPT_SACK 5 /* SACK Block */ #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ +#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ /* * TCP option lengths @@ -170,6 +172,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCPOLEN_WINDOW 3 #define TCPOLEN_SACK_PERM 2 #define TCPOLEN_TIMESTAMP 10 +#define TCPOLEN_MD5SIG 18 /* But this is what stacks really send out. */ #define TCPOLEN_TSTAMP_ALIGNED 12 @@ -178,6 +181,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCPOLEN_SACK_BASE 2 #define TCPOLEN_SACK_BASE_ALIGNED 4 #define TCPOLEN_SACK_PERBLOCK 8 +#define TCPOLEN_MD5SIG_ALIGNED 20 /* Flags in tp->nonagle */ #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ @@ -299,6 +303,8 @@ extern void tcp_cleanup_rbuf(struct sock *sk, int copied); extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); +extern void tcp_twsk_destructor(struct sock *sk); + static inline void tcp_dec_quickack_mode(struct sock *sk, const unsigned int pkts) { @@ -1064,6 +1070,114 @@ static inline void clear_all_retrans_hints(struct tcp_sock *tp){ tp->fastpath_skb_hint = NULL; } +/* MD5 Signature */ +struct crypto_hash; + +/* - key database */ +struct tcp_md5sig_key { + u8 *key; + u8 keylen; +}; + +struct tcp4_md5sig_key { + u8 *key; + u16 keylen; + __be32 addr; +}; + +struct tcp6_md5sig_key { + u8 *key; + u16 keylen; +#if 0 + u32 scope_id; /* XXX */ +#endif + struct in6_addr addr; +}; + +/* - sock block */ +struct tcp_md5sig_info { + struct tcp4_md5sig_key *keys4; +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + struct tcp6_md5sig_key *keys6; + u32 entries6; + u32 alloced6; +#endif + u32 entries4; + u32 alloced4; +}; + +/* - pseudo header */ +struct tcp4_pseudohdr { + __be32 saddr; + __be32 daddr; + __u8 pad; + __u8 protocol; + __be16 len; +}; + +struct tcp6_pseudohdr { + struct in6_addr saddr; + struct in6_addr daddr; + __be32 len; + __be32 protocol; /* including padding */ +}; + +union tcp_md5sum_block { + struct tcp4_pseudohdr ip4; +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + struct tcp6_pseudohdr ip6; +#endif +}; + +/* - pool: digest algorithm, hash description and scratch buffer */ +struct tcp_md5sig_pool { + struct hash_desc md5_desc; + union tcp_md5sum_block md5_blk; +}; + +#define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */ + +/* - functions */ +extern int tcp_v4_calc_md5_hash(char *md5_hash, + struct tcp_md5sig_key *key, + struct sock *sk, + struct dst_entry *dst, + struct request_sock *req, + struct tcphdr *th, + int protocol, int tcplen); +extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, + struct sock *addr_sk); + +extern int tcp_v4_md5_do_add(struct sock *sk, + __be32 addr, + u8 *newkey, + u8 newkeylen); + +extern int tcp_v4_md5_do_del(struct sock *sk, + u32 addr); + +extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void); +extern void tcp_free_md5sig_pool(void); + +extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu); +extern void __tcp_put_md5sig_pool(void); + +static inline +struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) +{ + int cpu = get_cpu(); + struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu); + if (!ret) + put_cpu(); + return ret; +} + +static inline void tcp_put_md5sig_pool(void) +{ + __tcp_put_md5sig_pool(); + put_cpu(); +} + /* /proc */ enum tcp_seq_states { TCP_SEQ_STATE_LISTENING, @@ -1103,6 +1217,35 @@ extern int tcp4_proc_init(void); extern void tcp4_proc_exit(void); #endif +/* TCP af-specific functions */ +struct tcp_sock_af_ops { +#ifdef CONFIG_TCP_MD5SIG + struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk, + struct sock *addr_sk); + int (*calc_md5_hash) (char *location, + struct tcp_md5sig_key *md5, + struct sock *sk, + struct dst_entry *dst, + struct request_sock *req, + struct tcphdr *th, + int protocol, int len); + int (*md5_add) (struct sock *sk, + struct sock *addr_sk, + u8 *newkey, + u8 len); + int (*md5_parse) (struct sock *sk, + char __user *optval, + int optlen); +#endif +}; + +struct tcp_request_sock_ops { +#ifdef CONFIG_TCP_MD5SIG + struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk, + struct request_sock *req); +#endif +}; + extern void tcp_v4_init(struct net_proto_family *ops); extern void tcp_init(void); diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h index be293d795e38..d7a306ea560d 100644 --- a/include/net/timewait_sock.h +++ b/include/net/timewait_sock.h @@ -31,6 +31,9 @@ static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp) static inline void twsk_destructor(struct sock *sk) { + BUG_ON(sk == NULL); + BUG_ON(sk->sk_prot == NULL); + BUG_ON(sk->sk_prot->twsk_prot == NULL); if (sk->sk_prot->twsk_prot->twsk_destructor != NULL) sk->sk_prot->twsk_prot->twsk_destructor(sk); } diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 34d6d197c3b2..35985334daee 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -509,7 +509,7 @@ out: return err; } -static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) +static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) { int err; struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; @@ -724,7 +724,7 @@ int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) return 0; reset: - dccp_v4_ctl_send_reset(skb); + dccp_v4_ctl_send_reset(sk, skb); discard: kfree_skb(skb); return 0; @@ -913,7 +913,7 @@ no_dccp_socket: if (dh->dccph_type != DCCP_PKT_RESET) { DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; - dccp_v4_ctl_send_reset(skb); + dccp_v4_ctl_send_reset(sk, skb); } discard_it: diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index fc326173c215..e0a0607862ef 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -310,7 +310,7 @@ static void dccp_v6_reqsk_destructor(struct request_sock *req) kfree_skb(inet6_rsk(req)->pktopts); } -static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) +static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) { struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) + @@ -805,7 +805,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) return 0; reset: - dccp_v6_ctl_send_reset(skb); + dccp_v6_ctl_send_reset(sk, skb); discard: if (opt_skb != NULL) __kfree_skb(opt_skb); @@ -902,7 +902,7 @@ no_dccp_socket: if (dh->dccph_type != DCCP_PKT_RESET) { DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; - dccp_v6_ctl_send_reset(skb); + dccp_v6_ctl_send_reset(sk, skb); } discard_it: diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 0c49733f5be1..3975048d8094 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -246,7 +246,7 @@ listen_overflow: DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; drop: if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) - req->rsk_ops->send_reset(skb); + req->rsk_ops->send_reset(sk, skb); inet_csk_reqsk_queue_drop(sk, req, prev); goto out; diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index bc298bcc344e..39e0cb763588 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -618,5 +618,21 @@ config DEFAULT_TCP_CONG default "reno" if DEFAULT_RENO default "cubic" +config TCP_MD5SIG + bool "TCP: MD5 Signature Option support (RFC2385) (EXPERIMENTAL)" + depends on EXPERIMENTAL + select CRYPTO + select CRYPTO_MD5 + ---help--- + RFC2385 specifices a method of giving MD5 protection to TCP sessions. + Its main (only?) use is to protect BGP sessions between core routers + on the Internet. + + If unsure, say N. + +config TCP_MD5SIG_DEBUG + bool "TCP: MD5 Signature Option debugging" + depends on TCP_MD5SIG + source "net/ipv4/ipvs/Kconfig" diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c05e8edaf544..dadef867a3bb 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -258,6 +258,7 @@ #include #include #include +#include #include #include @@ -1942,6 +1943,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level, } break; +#ifdef CONFIG_TCP_MD5SIG + case TCP_MD5SIG: + /* Read the IP->Key mappings from userspace */ + err = tp->af_specific->md5_parse(sk, optval, optlen); + break; +#endif + default: err = -ENOPROTOOPT; break; @@ -2231,6 +2239,135 @@ out: } EXPORT_SYMBOL(tcp_tso_segment); +#ifdef CONFIG_TCP_MD5SIG +static unsigned long tcp_md5sig_users; +static struct tcp_md5sig_pool **tcp_md5sig_pool; +static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); + +static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool) +{ + int cpu; + for_each_possible_cpu(cpu) { + struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu); + if (p) { + if (p->md5_desc.tfm) + crypto_free_hash(p->md5_desc.tfm); + kfree(p); + p = NULL; + } + } + free_percpu(pool); +} + +void tcp_free_md5sig_pool(void) +{ + struct tcp_md5sig_pool **pool = NULL; + + spin_lock(&tcp_md5sig_pool_lock); + if (--tcp_md5sig_users == 0) { + pool = tcp_md5sig_pool; + tcp_md5sig_pool = NULL; + } + spin_unlock(&tcp_md5sig_pool_lock); + if (pool) + __tcp_free_md5sig_pool(pool); +} + +EXPORT_SYMBOL(tcp_free_md5sig_pool); + +struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void) +{ + int cpu; + struct tcp_md5sig_pool **pool; + + pool = alloc_percpu(struct tcp_md5sig_pool *); + if (!pool) + return NULL; + + for_each_possible_cpu(cpu) { + struct tcp_md5sig_pool *p; + struct crypto_hash *hash; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + goto out_free; + *per_cpu_ptr(pool, cpu) = p; + + hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); + if (!hash || IS_ERR(hash)) + goto out_free; + + p->md5_desc.tfm = hash; + } + return pool; +out_free: + __tcp_free_md5sig_pool(pool); + return NULL; +} + +struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void) +{ + struct tcp_md5sig_pool **pool; + int alloc = 0; + +retry: + spin_lock(&tcp_md5sig_pool_lock); + pool = tcp_md5sig_pool; + if (tcp_md5sig_users++ == 0) { + alloc = 1; + spin_unlock(&tcp_md5sig_pool_lock); + } else if (!pool) { + tcp_md5sig_users--; + spin_unlock(&tcp_md5sig_pool_lock); + cpu_relax(); + goto retry; + } else + spin_unlock(&tcp_md5sig_pool_lock); + + if (alloc) { + /* we cannot hold spinlock here because this may sleep. */ + struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(); + spin_lock(&tcp_md5sig_pool_lock); + if (!p) { + tcp_md5sig_users--; + spin_unlock(&tcp_md5sig_pool_lock); + return NULL; + } + pool = tcp_md5sig_pool; + if (pool) { + /* oops, it has already been assigned. */ + spin_unlock(&tcp_md5sig_pool_lock); + __tcp_free_md5sig_pool(p); + } else { + tcp_md5sig_pool = pool = p; + spin_unlock(&tcp_md5sig_pool_lock); + } + } + return pool; +} + +EXPORT_SYMBOL(tcp_alloc_md5sig_pool); + +struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) +{ + struct tcp_md5sig_pool **p; + spin_lock(&tcp_md5sig_pool_lock); + p = tcp_md5sig_pool; + if (p) + tcp_md5sig_users++; + spin_unlock(&tcp_md5sig_pool_lock); + return (p ? *per_cpu_ptr(p, cpu) : NULL); +} + +EXPORT_SYMBOL(__tcp_get_md5sig_pool); + +void __tcp_put_md5sig_pool(void) { + __tcp_free_md5sig_pool(tcp_md5sig_pool); +} + +EXPORT_SYMBOL(__tcp_put_md5sig_pool); +#endif + extern void __skb_cb_too_small_for_tcp(int, int); extern struct tcp_congestion_ops tcp_reno; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 4a8c96cdec7d..6ab3423674bb 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2677,6 +2677,14 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, opt_rx->sack_ok) { TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; } +#ifdef CONFIG_TCP_MD5SIG + case TCPOPT_MD5SIG: + /* + * The MD5 Hash has already been + * checked (see tcp_v{4,6}_do_rcv()). + */ + break; +#endif }; ptr+=opsize-2; length-=opsize; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 0ad0904bf56c..8c8e8112f98d 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -78,6 +78,9 @@ #include #include +#include +#include + int sysctl_tcp_tw_reuse __read_mostly; int sysctl_tcp_low_latency __read_mostly; @@ -89,6 +92,13 @@ static struct socket *tcp_socket; void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); +#ifdef CONFIG_TCP_MD5SIG +static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr); +static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, + __be32 saddr, __be32 daddr, struct tcphdr *th, + int protocol, int tcplen); +#endif + struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock), .lhash_users = ATOMIC_INIT(0), @@ -526,11 +536,19 @@ int tcp_v4_gso_send_check(struct sk_buff *skb) * Exception: precedence violation. We do not implement it in any case. */ -static void tcp_v4_send_reset(struct sk_buff *skb) +static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) { struct tcphdr *th = skb->h.th; - struct tcphdr rth; + struct { + struct tcphdr th; +#ifdef CONFIG_TCP_MD5SIG + u32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)]; +#endif + } rep; struct ip_reply_arg arg; +#ifdef CONFIG_TCP_MD5SIG + struct tcp_md5sig_key *key; +#endif /* Never send a reset in response to a reset. */ if (th->rst) @@ -540,29 +558,50 @@ static void tcp_v4_send_reset(struct sk_buff *skb) return; /* Swap the send and the receive. */ - memset(&rth, 0, sizeof(struct tcphdr)); - rth.dest = th->source; - rth.source = th->dest; - rth.doff = sizeof(struct tcphdr) / 4; - rth.rst = 1; + memset(&rep, 0, sizeof(rep)); + rep.th.dest = th->source; + rep.th.source = th->dest; + rep.th.doff = sizeof(struct tcphdr) / 4; + rep.th.rst = 1; if (th->ack) { - rth.seq = th->ack_seq; + rep.th.seq = th->ack_seq; } else { - rth.ack = 1; - rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + - skb->len - (th->doff << 2)); + rep.th.ack = 1; + rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + + skb->len - (th->doff << 2)); } memset(&arg, 0, sizeof arg); - arg.iov[0].iov_base = (unsigned char *)&rth; - arg.iov[0].iov_len = sizeof rth; + arg.iov[0].iov_base = (unsigned char *)&rep; + arg.iov[0].iov_len = sizeof(rep.th); + +#ifdef CONFIG_TCP_MD5SIG + key = sk ? tcp_v4_md5_do_lookup(sk, skb->nh.iph->daddr) : NULL; + if (key) { + rep.opt[0] = htonl((TCPOPT_NOP << 24) | + (TCPOPT_NOP << 16) | + (TCPOPT_MD5SIG << 8) | + TCPOLEN_MD5SIG); + /* Update length and the length the header thinks exists */ + arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; + rep.th.doff = arg.iov[0].iov_len / 4; + + tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1], + key, + skb->nh.iph->daddr, + skb->nh.iph->saddr, + &rep.th, IPPROTO_TCP, + arg.iov[0].iov_len); + } +#endif + arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, skb->nh.iph->saddr, /*XXX*/ sizeof(struct tcphdr), IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; - ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth); + ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len); TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); @@ -572,15 +611,24 @@ static void tcp_v4_send_reset(struct sk_buff *skb) outside socket context is ugly, certainly. What can I do? */ -static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, +static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, + struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) { struct tcphdr *th = skb->h.th; struct { struct tcphdr th; - u32 tsopt[TCPOLEN_TSTAMP_ALIGNED >> 2]; + u32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) +#ifdef CONFIG_TCP_MD5SIG + + (TCPOLEN_MD5SIG_ALIGNED >> 2) +#endif + ]; } rep; struct ip_reply_arg arg; +#ifdef CONFIG_TCP_MD5SIG + struct tcp_md5sig_key *key; + struct tcp_md5sig_key tw_key; +#endif memset(&rep.th, 0, sizeof(struct tcphdr)); memset(&arg, 0, sizeof arg); @@ -588,12 +636,12 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); if (ts) { - rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | - (TCPOPT_TIMESTAMP << 8) | - TCPOLEN_TIMESTAMP); - rep.tsopt[1] = htonl(tcp_time_stamp); - rep.tsopt[2] = htonl(ts); - arg.iov[0].iov_len = sizeof(rep); + rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | + (TCPOPT_TIMESTAMP << 8) | + TCPOLEN_TIMESTAMP); + rep.opt[1] = htonl(tcp_time_stamp); + rep.opt[2] = htonl(ts); + arg.iov[0].iov_len = TCPOLEN_TSTAMP_ALIGNED; } /* Swap the send and the receive. */ @@ -605,6 +653,44 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, rep.th.ack = 1; rep.th.window = htons(win); +#ifdef CONFIG_TCP_MD5SIG + /* + * The SKB holds an imcoming packet, but may not have a valid ->sk + * pointer. This is especially the case when we're dealing with a + * TIME_WAIT ack, because the sk structure is long gone, and only + * the tcp_timewait_sock remains. So the md5 key is stashed in that + * structure, and we use it in preference. I believe that (twsk || + * skb->sk) holds true, but we program defensively. + */ + if (!twsk && skb->sk) { + key = tcp_v4_md5_do_lookup(skb->sk, skb->nh.iph->daddr); + } else if (twsk && twsk->tw_md5_keylen) { + tw_key.key = twsk->tw_md5_key; + tw_key.keylen = twsk->tw_md5_keylen; + key = &tw_key; + } else { + key = NULL; + } + + if (key) { + int offset = (ts) ? 3 : 0; + + rep.opt[offset++] = htonl((TCPOPT_NOP << 24) | + (TCPOPT_NOP << 16) | + (TCPOPT_MD5SIG << 8) | + TCPOLEN_MD5SIG); + arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; + rep.th.doff = arg.iov[0].iov_len/4; + + tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset], + key, + skb->nh.iph->daddr, + skb->nh.iph->saddr, + &rep.th, IPPROTO_TCP, + arg.iov[0].iov_len); + } +#endif + arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, skb->nh.iph->saddr, /*XXX*/ arg.iov[0].iov_len, IPPROTO_TCP, 0); @@ -618,9 +704,9 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) { struct inet_timewait_sock *tw = inet_twsk(sk); - const struct tcp_timewait_sock *tcptw = tcp_twsk(sk); + struct tcp_timewait_sock *tcptw = tcp_twsk(sk); - tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, + tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent); inet_twsk_put(tw); @@ -628,7 +714,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) { - tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, + tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, + tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent); } @@ -714,6 +801,461 @@ static struct ip_options *tcp_v4_save_options(struct sock *sk, return dopt; } +#ifdef CONFIG_TCP_MD5SIG +/* + * RFC2385 MD5 checksumming requires a mapping of + * IP address->MD5 Key. + * We need to maintain these in the sk structure. + */ + +/* Find the Key structure for an address. */ +static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) +{ + struct tcp_sock *tp = tcp_sk(sk); + int i; + + if (!tp->md5sig_info || !tp->md5sig_info->entries4) + return NULL; + for (i = 0; i < tp->md5sig_info->entries4; i++) { + if (tp->md5sig_info->keys4[i].addr == addr) + return (struct tcp_md5sig_key *)&tp->md5sig_info->keys4[i]; + } + return NULL; +} + +struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, + struct sock *addr_sk) +{ + return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr); +} + +EXPORT_SYMBOL(tcp_v4_md5_lookup); + +struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk, + struct request_sock *req) +{ + return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr); +} + +/* This can be called on a newly created socket, from other files */ +int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, + u8 *newkey, u8 newkeylen) +{ + /* Add Key to the list */ + struct tcp4_md5sig_key *key; + struct tcp_sock *tp = tcp_sk(sk); + struct tcp4_md5sig_key *keys; + + key = (struct tcp4_md5sig_key *) tcp_v4_md5_do_lookup(sk, addr); + if (key) { + /* Pre-existing entry - just update that one. */ + kfree (key->key); + key->key = newkey; + key->keylen = newkeylen; + } else { + if (!tp->md5sig_info) { + tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC); + if (!tp->md5sig_info) { + kfree(newkey); + return -ENOMEM; + } + } + if (tcp_alloc_md5sig_pool() == NULL) { + kfree(newkey); + return -ENOMEM; + } + if (tp->md5sig_info->alloced4 == tp->md5sig_info->entries4) { + keys = kmalloc((sizeof(struct tcp4_md5sig_key) * + (tp->md5sig_info->entries4 + 1)), GFP_ATOMIC); + if (!keys) { + kfree(newkey); + tcp_free_md5sig_pool(); + return -ENOMEM; + } + + if (tp->md5sig_info->entries4) + memcpy(keys, tp->md5sig_info->keys4, + (sizeof (struct tcp4_md5sig_key) * + tp->md5sig_info->entries4)); + + /* Free old key list, and reference new one */ + if (tp->md5sig_info->keys4) + kfree(tp->md5sig_info->keys4); + tp->md5sig_info->keys4 = keys; + tp->md5sig_info->alloced4++; + } + tp->md5sig_info->entries4++; + tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].addr = addr; + tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].key = newkey; + tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].keylen = newkeylen; + } + return 0; +} + +EXPORT_SYMBOL(tcp_v4_md5_do_add); + +static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk, + u8 *newkey, u8 newkeylen) +{ + return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr, + newkey, newkeylen); +} + +int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) +{ + struct tcp_sock *tp = tcp_sk(sk); + int i; + + for (i = 0; i < tp->md5sig_info->entries4; i++) { + if (tp->md5sig_info->keys4[i].addr == addr) { + /* Free the key */ + kfree(tp->md5sig_info->keys4[i].key); + tp->md5sig_info->entries4--; + + if (tp->md5sig_info->entries4 == 0) { + kfree(tp->md5sig_info->keys4); + tp->md5sig_info->keys4 = NULL; + } else { + /* Need to do some manipulation */ + if (tp->md5sig_info->entries4 != i) + memcpy(&tp->md5sig_info->keys4[i], + &tp->md5sig_info->keys4[i+1], + (tp->md5sig_info->entries4 - i) + * sizeof (struct tcp4_md5sig_key)); + } + tcp_free_md5sig_pool(); + return 0; + } + } + return -ENOENT; +} + +EXPORT_SYMBOL(tcp_v4_md5_do_del); + +static void tcp_v4_clear_md5_list (struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + + /* Free each key, then the set of key keys, + * the crypto element, and then decrement our + * hold on the last resort crypto. + */ + if (tp->md5sig_info->entries4) { + int i; + for (i = 0; i < tp->md5sig_info->entries4; i++) + kfree(tp->md5sig_info->keys4[i].key); + tp->md5sig_info->entries4 = 0; + tcp_free_md5sig_pool(); + } + if (tp->md5sig_info->keys4) { + kfree(tp->md5sig_info->keys4); + tp->md5sig_info->keys4 = NULL; + tp->md5sig_info->alloced4 = 0; + } +} + +static int tcp_v4_parse_md5_keys (struct sock *sk, char __user *optval, + int optlen) +{ + struct tcp_md5sig cmd; + struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; + u8 *newkey; + + if (optlen < sizeof(cmd)) + return -EINVAL; + + if (copy_from_user (&cmd, optval, sizeof(cmd))) + return -EFAULT; + + if (sin->sin_family != AF_INET) + return -EINVAL; + + if (!cmd.tcpm_key || !cmd.tcpm_keylen) { + if (!tcp_sk(sk)->md5sig_info) + return -ENOENT; + return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr); + } + + if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) + return -EINVAL; + + if (!tcp_sk(sk)->md5sig_info) { + struct tcp_sock *tp = tcp_sk(sk); + struct tcp_md5sig_info *p; + + p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL); + if (!p) + return -EINVAL; + + tp->md5sig_info = p; + + } + + newkey = kmalloc(cmd.tcpm_keylen, GFP_KERNEL); + if (!newkey) + return -ENOMEM; + memcpy(newkey, cmd.tcpm_key, cmd.tcpm_keylen); + return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr, + newkey, cmd.tcpm_keylen); +} + +static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, + __be32 saddr, __be32 daddr, + struct tcphdr *th, int protocol, + int tcplen) +{ + struct scatterlist sg[4]; + __u16 data_len; + int block = 0; +#ifdef CONFIG_TCP_MD5SIG_DEBUG + int i; +#endif + __u16 old_checksum; + struct tcp_md5sig_pool *hp; + struct tcp4_pseudohdr *bp; + struct hash_desc *desc; + int err; + unsigned int nbytes = 0; + + /* + * Okay, so RFC2385 is turned on for this connection, + * so we need to generate the MD5 hash for the packet now. + */ + + hp = tcp_get_md5sig_pool(); + if (!hp) + goto clear_hash_noput; + + bp = &hp->md5_blk.ip4; + desc = &hp->md5_desc; + + /* + * 1. the TCP pseudo-header (in the order: source IP address, + * destination IP address, zero-padded protocol number, and + * segment length) + */ + bp->saddr = saddr; + bp->daddr = daddr; + bp->pad = 0; + bp->protocol = protocol; + bp->len = htons(tcplen); + sg_set_buf(&sg[block++], bp, sizeof(*bp)); + nbytes += sizeof(*bp); + +#ifdef CONFIG_TCP_MD5SIG_DEBUG + printk("Calcuating hash for: "); + for (i = 0; i < sizeof (*bp); i++) + printk ("%02x ", (unsigned int)((unsigned char *)bp)[i]); + printk(" "); +#endif + + /* 2. the TCP header, excluding options, and assuming a + * checksum of zero/ + */ + old_checksum = th->check; + th->check = 0; + sg_set_buf(&sg[block++], th, sizeof(struct tcphdr)); + nbytes += sizeof(struct tcphdr); +#ifdef CONFIG_TCP_MD5SIG_DEBUG + for (i = 0; i < sizeof (struct tcphdr); i++) + printk (" %02x", (unsigned int)((unsigned char *)th)[i]); +#endif + /* 3. the TCP segment data (if any) */ + data_len = tcplen - (th->doff << 2); + if (data_len > 0) { + unsigned char *data = (unsigned char *)th + (th->doff << 2); + sg_set_buf(&sg[block++], data, data_len); + nbytes += data_len; + } + + /* 4. an independently-specified key or password, known to both + * TCPs and presumably connection-specific + */ + sg_set_buf(&sg[block++], key->key, key->keylen); + nbytes += key->keylen; + +#ifdef CONFIG_TCP_MD5SIG_DEBUG + printk (" and password: "); + for (i = 0; i < key->keylen; i++) + printk ("%02x ", (unsigned int)key->key[i]); +#endif + + /* Now store the Hash into the packet */ + err = crypto_hash_init(desc); + if (err) + goto clear_hash; + err = crypto_hash_update(desc, sg, nbytes); + if (err) + goto clear_hash; + err = crypto_hash_final(desc, md5_hash); + if (err) + goto clear_hash; + + /* Reset header, and free up the crypto */ + tcp_put_md5sig_pool(); + th->check = old_checksum; + +out: +#ifdef CONFIG_TCP_MD5SIG_DEBUG + printk(" result:"); + for (i = 0; i < 16; i++) + printk (" %02x", (unsigned int)(((u8*)md5_hash)[i])); + printk("\n"); +#endif + return 0; +clear_hash: + tcp_put_md5sig_pool(); +clear_hash_noput: + memset(md5_hash, 0, 16); + goto out; +} + +int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, + struct sock *sk, + struct dst_entry *dst, + struct request_sock *req, + struct tcphdr *th, int protocol, + int tcplen) +{ + __be32 saddr, daddr; + + if (sk) { + saddr = inet_sk(sk)->saddr; + daddr = inet_sk(sk)->daddr; + } else { + struct rtable *rt = (struct rtable *)dst; + BUG_ON(!rt); + saddr = rt->rt_src; + daddr = rt->rt_dst; + } + return tcp_v4_do_calc_md5_hash(md5_hash, key, + saddr, daddr, + th, protocol, tcplen); +} + +EXPORT_SYMBOL(tcp_v4_calc_md5_hash); + +static int tcp_v4_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) +{ + /* + * This gets called for each TCP segment that arrives + * so we want to be efficient. + * We have 3 drop cases: + * o No MD5 hash and one expected. + * o MD5 hash and we're not expecting one. + * o MD5 hash and its wrong. + */ + __u8 *hash_location = NULL; + struct tcp_md5sig_key *hash_expected; + struct iphdr *iph = skb->nh.iph; + struct tcphdr *th = skb->h.th; + int length = (th->doff << 2) - sizeof (struct tcphdr); + int genhash; + unsigned char *ptr; + unsigned char newhash[16]; + + hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr); + + /* + * If the TCP option length is less than the TCP_MD5SIG + * option length, then we can shortcut + */ + if (length < TCPOLEN_MD5SIG) { + if (hash_expected) + return 1; + else + return 0; + } + + /* Okay, we can't shortcut - we have to grub through the options */ + ptr = (unsigned char *)(th + 1); + while (length > 0) { + int opcode = *ptr++; + int opsize; + + switch (opcode) { + case TCPOPT_EOL: + goto done_opts; + case TCPOPT_NOP: + length--; + continue; + default: + opsize = *ptr++; + if (opsize < 2) + goto done_opts; + if (opsize > length) + goto done_opts; + + if (opcode == TCPOPT_MD5SIG) { + hash_location = ptr; + goto done_opts; + } + } + ptr += opsize-2; + length -= opsize; + } +done_opts: + /* We've parsed the options - do we have a hash? */ + if (!hash_expected && !hash_location) + return 0; + + if (hash_expected && !hash_location) { + if (net_ratelimit()) { + printk(KERN_INFO "MD5 Hash NOT expected but found " + "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n", + NIPQUAD (iph->saddr), ntohs(th->source), + NIPQUAD (iph->daddr), ntohs(th->dest)); + } + return 1; + } + + if (!hash_expected && hash_location) { + if (net_ratelimit()) { + printk(KERN_INFO "MD5 Hash NOT expected but found " + "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n", + NIPQUAD (iph->saddr), ntohs(th->source), + NIPQUAD (iph->daddr), ntohs(th->dest)); + } + return 1; + } + + /* Okay, so this is hash_expected and hash_location - + * so we need to calculate the checksum. + */ + genhash = tcp_v4_do_calc_md5_hash(newhash, + hash_expected, + iph->saddr, iph->daddr, + th, sk->sk_protocol, + skb->len); + + if (genhash || memcmp(hash_location, newhash, 16) != 0) { + if (net_ratelimit()) { + printk(KERN_INFO "MD5 Hash failed for " + "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n", + NIPQUAD (iph->saddr), ntohs(th->source), + NIPQUAD (iph->daddr), ntohs(th->dest), + genhash ? " tcp_v4_calc_md5_hash failed" : ""); +#ifdef CONFIG_TCP_MD5SIG_DEBUG + do { + int i; + printk("Received: "); + for (i = 0; i < 16; i++) + printk("%02x ", 0xff & (int)hash_location[i]); + printk("\n"); + printk("Calculated: "); + for (i = 0; i < 16; i++) + printk("%02x ", 0xff & (int)newhash[i]); + printk("\n"); + } while(0); +#endif + } + return 1; + } + return 0; +} + +#endif + struct request_sock_ops tcp_request_sock_ops __read_mostly = { .family = PF_INET, .obj_size = sizeof(struct tcp_request_sock), @@ -723,9 +1265,16 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = { .send_reset = tcp_v4_send_reset, }; +struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { +#ifdef CONFIG_TCP_MD5SIG + .md5_lookup = tcp_v4_reqsk_md5_lookup, +#endif +}; + static struct timewait_sock_ops tcp_timewait_sock_ops = { .twsk_obj_size = sizeof(struct tcp_timewait_sock), .twsk_unique = tcp_twsk_unique, + .twsk_destructor= tcp_twsk_destructor, }; int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) @@ -773,6 +1322,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) if (!req) goto drop; +#ifdef CONFIG_TCP_MD5SIG + tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; +#endif + tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = 536; tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; @@ -891,6 +1444,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, struct inet_sock *newinet; struct tcp_sock *newtp; struct sock *newsk; +#ifdef CONFIG_TCP_MD5SIG + struct tcp_md5sig_key *key; +#endif if (sk_acceptq_is_full(sk)) goto exit_overflow; @@ -925,6 +1481,24 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, newtp->advmss = dst_metric(dst, RTAX_ADVMSS); tcp_initialize_rcv_mss(newsk); +#ifdef CONFIG_TCP_MD5SIG + /* Copy over the MD5 key from the original socket */ + if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) { + /* + * We're using one, so create a matching key + * on the newsk structure. If we fail to get + * memory, then we end up not copying the key + * across. Shucks. + */ + char *newkey = kmalloc(key->keylen, GFP_ATOMIC); + if (newkey) { + memcpy(newkey, key->key, key->keylen); + tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr, + newkey, key->keylen); + } + } +#endif + __inet_hash(&tcp_hashinfo, newsk, 0); __inet_inherit_port(&tcp_hashinfo, sk, newsk); @@ -1000,10 +1574,24 @@ static int tcp_v4_checksum_init(struct sk_buff *skb) */ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) { + struct sock *rsk; +#ifdef CONFIG_TCP_MD5SIG + /* + * We really want to reject the packet as early as possible + * if: + * o We're expecting an MD5'd packet and this is no MD5 tcp option + * o There is an MD5 option and we're not expecting one + */ + if (tcp_v4_inbound_md5_hash (sk, skb)) + goto discard; +#endif + if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ TCP_CHECK_TIMER(sk); - if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) + if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) { + rsk = sk; goto reset; + } TCP_CHECK_TIMER(sk); return 0; } @@ -1017,20 +1605,24 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) goto discard; if (nsk != sk) { - if (tcp_child_process(sk, nsk, skb)) + if (tcp_child_process(sk, nsk, skb)) { + rsk = nsk; goto reset; + } return 0; } } TCP_CHECK_TIMER(sk); - if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) + if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) { + rsk = sk; goto reset; + } TCP_CHECK_TIMER(sk); return 0; reset: - tcp_v4_send_reset(skb); + tcp_v4_send_reset(rsk, skb); discard: kfree_skb(skb); /* Be careful here. If this function gets more complicated and @@ -1139,7 +1731,7 @@ no_tcp_socket: bad_packet: TCP_INC_STATS_BH(TCP_MIB_INERRS); } else { - tcp_v4_send_reset(skb); + tcp_v4_send_reset(NULL, skb); } discard_it: @@ -1262,6 +1854,15 @@ struct inet_connection_sock_af_ops ipv4_specific = { #endif }; +struct tcp_sock_af_ops tcp_sock_ipv4_specific = { +#ifdef CONFIG_TCP_MD5SIG + .md5_lookup = tcp_v4_md5_lookup, + .calc_md5_hash = tcp_v4_calc_md5_hash, + .md5_add = tcp_v4_md5_add_func, + .md5_parse = tcp_v4_parse_md5_keys, +#endif +}; + /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ @@ -1301,6 +1902,9 @@ static int tcp_v4_init_sock(struct sock *sk) icsk->icsk_af_ops = &ipv4_specific; icsk->icsk_sync_mss = tcp_sync_mss; +#ifdef CONFIG_TCP_MD5SIG + tp->af_specific = &tcp_sock_ipv4_specific; +#endif sk->sk_sndbuf = sysctl_tcp_wmem[1]; sk->sk_rcvbuf = sysctl_tcp_rmem[1]; @@ -1324,6 +1928,15 @@ int tcp_v4_destroy_sock(struct sock *sk) /* Cleans up our, hopefully empty, out_of_order_queue. */ __skb_queue_purge(&tp->out_of_order_queue); +#ifdef CONFIG_TCP_MD5SIG + /* Clean up the MD5 key list, if any */ + if (tp->md5sig_info) { + tcp_v4_clear_md5_list(sk); + kfree(tp->md5sig_info); + tp->md5sig_info = NULL; + } +#endif + #ifdef CONFIG_NET_DMA /* Cleans up our sk_async_wait_queue */ __skb_queue_purge(&sk->sk_async_wait_queue); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 0163d9826907..ac55d8892cf1 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -306,6 +306,28 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) tw->tw_ipv6only = np->ipv6only; } #endif + +#ifdef CONFIG_TCP_MD5SIG + /* + * The timewait bucket does not have the key DB from the + * sock structure. We just make a quick copy of the + * md5 key being used (if indeed we are using one) + * so the timewait ack generating code has the key. + */ + do { + struct tcp_md5sig_key *key; + memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key)); + tcptw->tw_md5_keylen = 0; + key = tp->af_specific->md5_lookup(sk, sk); + if (key != NULL) { + memcpy(&tcptw->tw_md5_key, key->key, key->keylen); + tcptw->tw_md5_keylen = key->keylen; + if (tcp_alloc_md5sig_pool() == NULL) + BUG(); + } + } while(0); +#endif + /* Linkage updates. */ __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); @@ -337,6 +359,17 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) tcp_done(sk); } +void tcp_twsk_destructor(struct sock *sk) +{ + struct tcp_timewait_sock *twsk = tcp_twsk(sk); +#ifdef CONFIG_TCP_MD5SIG + if (twsk->tw_md5_keylen) + tcp_put_md5sig_pool(); +#endif +} + +EXPORT_SYMBOL_GPL(tcp_twsk_destructor); + /* This is not only more efficient than what we used to do, it eliminates * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM * @@ -435,6 +468,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->rx_opt.ts_recent_stamp = 0; newtp->tcp_header_len = sizeof(struct tcphdr); } +#ifdef CONFIG_TCP_MD5SIG + newtp->md5sig_info = NULL; /*XXX*/ + if (newtp->af_specific->md5_lookup(sk, newsk)) + newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; +#endif if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; newtp->rx_opt.mss_clamp = req->mss; @@ -617,6 +655,30 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, req, NULL); if (child == NULL) goto listen_overflow; +#ifdef CONFIG_TCP_MD5SIG + else { + /* Copy over the MD5 key from the original socket */ + struct tcp_md5sig_key *key; + struct tcp_sock *tp = tcp_sk(sk); + key = tp->af_specific->md5_lookup(sk, child); + if (key != NULL) { + /* + * We're using one, so create a matching key on the + * newsk structure. If we fail to get memory then we + * end up not copying the key across. Shucks. + */ + char *newkey = kmalloc(key->keylen, GFP_ATOMIC); + if (newkey) { + if (!tcp_alloc_md5sig_pool()) + BUG(); + memcpy(newkey, key->key, key->keylen); + tp->af_specific->md5_add(child, child, + newkey, + key->keylen); + } + } + } +#endif inet_csk_reqsk_queue_unlink(sk, req, prev); inet_csk_reqsk_queue_removed(sk, req); @@ -633,7 +695,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, embryonic_reset: NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); if (!(flg & TCP_FLAG_RST)) - req->rsk_ops->send_reset(skb); + req->rsk_ops->send_reset(sk, skb); inet_csk_reqsk_queue_drop(sk, req, prev); return NULL; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 6a8581ab9a23..32c1a972fa31 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -270,7 +270,7 @@ static u16 tcp_select_window(struct sock *sk) } static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, - __u32 tstamp) + __u32 tstamp, __u8 **md5_hash) { if (tp->rx_opt.tstamp_ok) { *ptr++ = htonl((TCPOPT_NOP << 24) | @@ -298,16 +298,29 @@ static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, tp->rx_opt.eff_sacks--; } } +#ifdef CONFIG_TCP_MD5SIG + if (md5_hash) { + *ptr++ = htonl((TCPOPT_NOP << 24) | + (TCPOPT_NOP << 16) | + (TCPOPT_MD5SIG << 8) | + TCPOLEN_MD5SIG); + *md5_hash = (__u8 *)ptr; + } +#endif } /* Construct a tcp options header for a SYN or SYN_ACK packet. * If this is every changed make sure to change the definition of * MAX_SYN_SIZE to match the new maximum number of options that you * can generate. + * + * Note - that with the RFC2385 TCP option, we make room for the + * 16 byte MD5 hash. This will be filled in later, so the pointer for the + * location to be filled is passed back up. */ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, int offer_wscale, int wscale, __u32 tstamp, - __u32 ts_recent) + __u32 ts_recent, __u8 **md5_hash) { /* We always get an MSS option. * The option bytes which will be seen in normal data @@ -346,6 +359,20 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale)); +#ifdef CONFIG_TCP_MD5SIG + /* + * If MD5 is enabled, then we set the option, and include the size + * (always 18). The actual MD5 hash is added just before the + * packet is sent. + */ + if (md5_hash) { + *ptr++ = htonl((TCPOPT_NOP << 24) | + (TCPOPT_NOP << 16) | + (TCPOPT_MD5SIG << 8) | + TCPOLEN_MD5SIG); + *md5_hash = (__u8 *) ptr; + } +#endif } /* This routine actually transmits TCP packets queued in by @@ -366,6 +393,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, struct tcp_sock *tp; struct tcp_skb_cb *tcb; int tcp_header_size; +#ifdef CONFIG_TCP_MD5SIG + struct tcp_md5sig_key *md5; + __u8 *md5_hash_location; +#endif struct tcphdr *th; int sysctl_flags; int err; @@ -424,6 +455,16 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, if (tcp_packets_in_flight(tp) == 0) tcp_ca_event(sk, CA_EVENT_TX_START); +#ifdef CONFIG_TCP_MD5SIG + /* + * Are we doing MD5 on this segment? If so - make + * room for it. + */ + md5 = tp->af_specific->md5_lookup(sk, sk); + if (md5) + tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; +#endif + th = (struct tcphdr *) skb_push(skb, tcp_header_size); skb->h.th = th; @@ -460,13 +501,34 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, (sysctl_flags & SYSCTL_FLAG_WSCALE), tp->rx_opt.rcv_wscale, tcb->when, - tp->rx_opt.ts_recent); + tp->rx_opt.ts_recent, + +#ifdef CONFIG_TCP_MD5SIG + md5 ? &md5_hash_location : +#endif + NULL); } else { tcp_build_and_update_options((__be32 *)(th + 1), - tp, tcb->when); + tp, tcb->when, +#ifdef CONFIG_TCP_MD5SIG + md5 ? &md5_hash_location : +#endif + NULL); TCP_ECN_send(sk, tp, skb, tcp_header_size); } +#ifdef CONFIG_TCP_MD5SIG + /* Calculate the MD5 hash, as we have all we need now */ + if (md5) { + tp->af_specific->calc_md5_hash(md5_hash_location, + md5, + sk, NULL, NULL, + skb->h.th, + sk->sk_protocol, + skb->len); + } +#endif + icsk->icsk_af_ops->send_check(sk, skb->len, skb); if (likely(tcb->flags & TCPCB_FLAG_ACK)) @@ -840,6 +902,11 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); +#ifdef CONFIG_TCP_MD5SIG + if (tp->af_specific->md5_lookup(sk, sk)) + mss_now -= TCPOLEN_MD5SIG_ALIGNED; +#endif + xmit_size_goal = mss_now; if (doing_tso) { @@ -2033,6 +2100,10 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, struct tcphdr *th; int tcp_header_size; struct sk_buff *skb; +#ifdef CONFIG_TCP_MD5SIG + struct tcp_md5sig_key *md5; + __u8 *md5_hash_location; +#endif skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); if (skb == NULL) @@ -2048,6 +2119,13 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + /* SACK_PERM is in the place of NOP NOP of TS */ ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); + +#ifdef CONFIG_TCP_MD5SIG + /* Are we doing MD5 on this segment? If so - make room for it */ + md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); + if (md5) + tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; +#endif skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); memset(th, 0, sizeof(struct tcphdr)); @@ -2085,11 +2163,29 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, TCP_SKB_CB(skb)->when, - req->ts_recent); + req->ts_recent, + ( +#ifdef CONFIG_TCP_MD5SIG + md5 ? &md5_hash_location : +#endif + NULL) + ); skb->csum = 0; th->doff = (tcp_header_size >> 2); TCP_INC_STATS(TCP_MIB_OUTSEGS); + +#ifdef CONFIG_TCP_MD5SIG + /* Okay, we have all we need - do the md5 hash if needed */ + if (md5) { + tp->af_specific->calc_md5_hash(md5_hash_location, + md5, + NULL, dst, req, + skb->h.th, sk->sk_protocol, + skb->len); + } +#endif + return skb; } @@ -2108,6 +2204,11 @@ static void tcp_connect_init(struct sock *sk) tp->tcp_header_len = sizeof(struct tcphdr) + (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); +#ifdef CONFIG_TCP_MD5SIG + if (tp->af_specific->md5_lookup(sk, sk) != NULL) + tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; +#endif + /* If user gave his TCP_MAXSEG, record it to clamp */ if (tp->rx_opt.user_mss) tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 9a88395a7629..663d1d238014 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -66,10 +66,13 @@ #include #include +#include +#include + /* Socket used for sending RSTs and ACKs */ static struct socket *tcp6_socket; -static void tcp_v6_send_reset(struct sk_buff *skb); +static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb); @@ -78,6 +81,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); static struct inet_connection_sock_af_ops ipv6_mapped; static struct inet_connection_sock_af_ops ipv6_specific; +static struct tcp_sock_af_ops tcp_sock_ipv6_specific; +static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; static int tcp_v6_get_port(struct sock *sk, unsigned short snum) { @@ -208,6 +213,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, icsk->icsk_af_ops = &ipv6_mapped; sk->sk_backlog_rcv = tcp_v4_do_rcv; +#ifdef CONFIG_TCP_MD5SIG + tp->af_specific = &tcp_sock_ipv6_mapped_specific; +#endif err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); @@ -215,6 +223,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, icsk->icsk_ext_hdr_len = exthdrlen; icsk->icsk_af_ops = &ipv6_specific; sk->sk_backlog_rcv = tcp_v6_do_rcv; +#ifdef CONFIG_TCP_MD5SIG + tp->af_specific = &tcp_sock_ipv6_specific; +#endif goto failure; } else { ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), @@ -518,6 +529,396 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req) kfree_skb(inet6_rsk(req)->pktopts); } +#ifdef CONFIG_TCP_MD5SIG +static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, + struct in6_addr *addr) +{ + struct tcp_sock *tp = tcp_sk(sk); + int i; + + BUG_ON(tp == NULL); + + if (!tp->md5sig_info || !tp->md5sig_info->entries6) + return NULL; + + for (i = 0; i < tp->md5sig_info->entries6; i++) { + if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0) + return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i]; + } + return NULL; +} + +static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, + struct sock *addr_sk) +{ + return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr); +} + +static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk, + struct request_sock *req) +{ + return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr); +} + +static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, + char *newkey, u8 newkeylen) +{ + /* Add key to the list */ + struct tcp6_md5sig_key *key; + struct tcp_sock *tp = tcp_sk(sk); + struct tcp6_md5sig_key *keys; + + key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer); + if (key) { + /* modify existing entry - just update that one */ + kfree(key->key); + key->key = newkey; + key->keylen = newkeylen; + } else { + /* reallocate new list if current one is full. */ + if (!tp->md5sig_info) { + tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC); + if (!tp->md5sig_info) { + kfree(newkey); + return -ENOMEM; + } + } + tcp_alloc_md5sig_pool(); + if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) { + keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) * + (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); + + if (!keys) { + tcp_free_md5sig_pool(); + kfree(newkey); + return -ENOMEM; + } + + if (tp->md5sig_info->entries6) + memmove(keys, tp->md5sig_info->keys6, + (sizeof (tp->md5sig_info->keys6[0]) * + tp->md5sig_info->entries6)); + + kfree(tp->md5sig_info->keys6); + tp->md5sig_info->keys6 = keys; + tp->md5sig_info->alloced6++; + } + + ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr, + peer); + tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey; + tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen; + + tp->md5sig_info->entries6++; + } + return 0; +} + +static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk, + u8 *newkey, __u8 newkeylen) +{ + return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr, + newkey, newkeylen); +} + +static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer) +{ + struct tcp_sock *tp = tcp_sk(sk); + int i; + + for (i = 0; i < tp->md5sig_info->entries6; i++) { + if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) { + /* Free the key */ + kfree(tp->md5sig_info->keys6[i].key); + tp->md5sig_info->entries6--; + + if (tp->md5sig_info->entries6 == 0) { + kfree(tp->md5sig_info->keys6); + tp->md5sig_info->keys6 = NULL; + + tcp_free_md5sig_pool(); + + return 0; + } else { + /* shrink the database */ + if (tp->md5sig_info->entries6 != i) + memmove(&tp->md5sig_info->keys6[i], + &tp->md5sig_info->keys6[i+1], + (tp->md5sig_info->entries6 - i) + * sizeof (tp->md5sig_info->keys6[0])); + } + } + } + return -ENOENT; +} + +static void tcp_v6_clear_md5_list (struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + int i; + + if (tp->md5sig_info->entries6) { + for (i = 0; i < tp->md5sig_info->entries6; i++) + kfree(tp->md5sig_info->keys6[i].key); + tp->md5sig_info->entries6 = 0; + tcp_free_md5sig_pool(); + } + + kfree(tp->md5sig_info->keys6); + tp->md5sig_info->keys6 = NULL; + tp->md5sig_info->alloced6 = 0; + + if (tp->md5sig_info->entries4) { + for (i = 0; i < tp->md5sig_info->entries4; i++) + kfree(tp->md5sig_info->keys4[i].key); + tp->md5sig_info->entries4 = 0; + tcp_free_md5sig_pool(); + } + + kfree(tp->md5sig_info->keys4); + tp->md5sig_info->keys4 = NULL; + tp->md5sig_info->alloced4 = 0; +} + +static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, + int optlen) +{ + struct tcp_md5sig cmd; + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; + u8 *newkey; + + if (optlen < sizeof(cmd)) + return -EINVAL; + + if (copy_from_user(&cmd, optval, sizeof(cmd))) + return -EFAULT; + + if (sin6->sin6_family != AF_INET6) + return -EINVAL; + + if (!cmd.tcpm_keylen) { + if (!tcp_sk(sk)->md5sig_info) + return -ENOENT; + if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) + return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]); + return tcp_v6_md5_do_del(sk, &sin6->sin6_addr); + } + + if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) + return -EINVAL; + + if (!tcp_sk(sk)->md5sig_info) { + struct tcp_sock *tp = tcp_sk(sk); + struct tcp_md5sig_info *p; + + p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL); + if (!p) + return -ENOMEM; + + tp->md5sig_info = p; + } + + newkey = kmalloc(cmd.tcpm_keylen, GFP_KERNEL); + if (!newkey) + return -ENOMEM; + memcpy(newkey, cmd.tcpm_key, cmd.tcpm_keylen); + if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) { + return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3], + newkey, cmd.tcpm_keylen); + } + return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen); +} + +static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, + struct in6_addr *saddr, + struct in6_addr *daddr, + struct tcphdr *th, int protocol, + int tcplen) +{ + struct scatterlist sg[4]; + __u16 data_len; + int block = 0; + __u16 cksum; + struct tcp_md5sig_pool *hp; + struct tcp6_pseudohdr *bp; + struct hash_desc *desc; + int err; + unsigned int nbytes = 0; + + hp = tcp_get_md5sig_pool(); + if (!hp) { + printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__); + goto clear_hash_noput; + } + bp = &hp->md5_blk.ip6; + desc = &hp->md5_desc; + + /* 1. TCP pseudo-header (RFC2460) */ + ipv6_addr_copy(&bp->saddr, saddr); + ipv6_addr_copy(&bp->daddr, daddr); + bp->len = htonl(tcplen); + bp->protocol = htonl(protocol); + + sg_set_buf(&sg[block++], bp, sizeof(*bp)); + nbytes += sizeof(*bp); + + /* 2. TCP header, excluding options */ + cksum = th->check; + th->check = 0; + sg_set_buf(&sg[block++], th, sizeof(*th)); + nbytes += sizeof(*th); + + /* 3. TCP segment data (if any) */ + data_len = tcplen - (th->doff << 2); + if (data_len > 0) { + u8 *data = (u8 *)th + (th->doff << 2); + sg_set_buf(&sg[block++], data, data_len); + nbytes += data_len; + } + + /* 4. shared key */ + sg_set_buf(&sg[block++], key->key, key->keylen); + nbytes += key->keylen; + + /* Now store the hash into the packet */ + err = crypto_hash_init(desc); + if (err) { + printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__); + goto clear_hash; + } + err = crypto_hash_update(desc, sg, nbytes); + if (err) { + printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__); + goto clear_hash; + } + err = crypto_hash_final(desc, md5_hash); + if (err) { + printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__); + goto clear_hash; + } + + /* Reset header, and free up the crypto */ + tcp_put_md5sig_pool(); + th->check = cksum; +out: + return 0; +clear_hash: + tcp_put_md5sig_pool(); +clear_hash_noput: + memset(md5_hash, 0, 16); + goto out; +} + +static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, + struct sock *sk, + struct dst_entry *dst, + struct request_sock *req, + struct tcphdr *th, int protocol, + int tcplen) +{ + struct in6_addr *saddr, *daddr; + + if (sk) { + saddr = &inet6_sk(sk)->saddr; + daddr = &inet6_sk(sk)->daddr; + } else { + saddr = &inet6_rsk(req)->loc_addr; + daddr = &inet6_rsk(req)->rmt_addr; + } + return tcp_v6_do_calc_md5_hash(md5_hash, key, + saddr, daddr, + th, protocol, tcplen); +} + +static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) +{ + __u8 *hash_location = NULL; + struct tcp_md5sig_key *hash_expected; + struct ipv6hdr *ip6h = skb->nh.ipv6h; + struct tcphdr *th = skb->h.th; + int length = (th->doff << 2) - sizeof (*th); + int genhash; + u8 *ptr; + u8 newhash[16]; + + hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); + + /* If the TCP option is too short, we can short cut */ + if (length < TCPOLEN_MD5SIG) + return hash_expected ? 1 : 0; + + /* parse options */ + ptr = (u8*)(th + 1); + while (length > 0) { + int opcode = *ptr++; + int opsize; + + switch(opcode) { + case TCPOPT_EOL: + goto done_opts; + case TCPOPT_NOP: + length--; + continue; + default: + opsize = *ptr++; + if (opsize < 2 || opsize > length) + goto done_opts; + if (opcode == TCPOPT_MD5SIG) { + hash_location = ptr; + goto done_opts; + } + } + ptr += opsize - 2; + length -= opsize; + } + +done_opts: + /* do we have a hash as expected? */ + if (!hash_expected) { + if (!hash_location) + return 0; + if (net_ratelimit()) { + printk(KERN_INFO "MD5 Hash NOT expected but found " + "(" NIP6_FMT ", %u)->" + "(" NIP6_FMT ", %u)\n", + NIP6(ip6h->saddr), ntohs(th->source), + NIP6(ip6h->daddr), ntohs(th->dest)); + } + return 1; + } + + if (!hash_location) { + if (net_ratelimit()) { + printk(KERN_INFO "MD5 Hash expected but NOT found " + "(" NIP6_FMT ", %u)->" + "(" NIP6_FMT ", %u)\n", + NIP6(ip6h->saddr), ntohs(th->source), + NIP6(ip6h->daddr), ntohs(th->dest)); + } + return 1; + } + + /* check the signature */ + genhash = tcp_v6_do_calc_md5_hash(newhash, + hash_expected, + &ip6h->saddr, &ip6h->daddr, + th, sk->sk_protocol, + skb->len); + if (genhash || memcmp(hash_location, newhash, 16) != 0) { + if (net_ratelimit()) { + printk(KERN_INFO "MD5 Hash %s for " + "(" NIP6_FMT ", %u)->" + "(" NIP6_FMT ", %u)\n", + genhash ? "failed" : "mismatch", + NIP6(ip6h->saddr), ntohs(th->source), + NIP6(ip6h->daddr), ntohs(th->dest)); + } + return 1; + } + return 0; +} +#endif + static struct request_sock_ops tcp6_request_sock_ops __read_mostly = { .family = AF_INET6, .obj_size = sizeof(struct tcp6_request_sock), @@ -527,9 +928,16 @@ static struct request_sock_ops tcp6_request_sock_ops __read_mostly = { .send_reset = tcp_v6_send_reset }; +struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { +#ifdef CONFIG_TCP_MD5SIG + .md5_lookup = tcp_v6_reqsk_md5_lookup, +#endif +}; + static struct timewait_sock_ops tcp6_timewait_sock_ops = { .twsk_obj_size = sizeof(struct tcp6_timewait_sock), .twsk_unique = tcp_twsk_unique, + .twsk_destructor= tcp_twsk_destructor, }; static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) @@ -566,11 +974,15 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb) return 0; } -static void tcp_v6_send_reset(struct sk_buff *skb) +static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) { struct tcphdr *th = skb->h.th, *t1; struct sk_buff *buff; struct flowi fl; + int tot_len = sizeof(*th); +#ifdef CONFIG_TCP_MD5SIG + struct tcp_md5sig_key *key; +#endif if (th->rst) return; @@ -578,25 +990,35 @@ static void tcp_v6_send_reset(struct sk_buff *skb) if (!ipv6_unicast_destination(skb)) return; +#ifdef CONFIG_TCP_MD5SIG + if (sk) + key = tcp_v6_md5_do_lookup(sk, &skb->nh.ipv6h->daddr); + else + key = NULL; + + if (key) + tot_len += TCPOLEN_MD5SIG_ALIGNED; +#endif + /* * We need to grab some memory, and put together an RST, * and then put it into the queue to be sent. */ - buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr), + buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, GFP_ATOMIC); if (buff == NULL) return; - skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr)); + skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); - t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr)); + t1 = (struct tcphdr *) skb_push(buff, tot_len); /* Swap the send and the receive. */ memset(t1, 0, sizeof(*t1)); t1->dest = th->source; t1->source = th->dest; - t1->doff = sizeof(*t1)/4; + t1->doff = tot_len / 4; t1->rst = 1; if(th->ack) { @@ -607,6 +1029,22 @@ static void tcp_v6_send_reset(struct sk_buff *skb) + skb->len - (th->doff<<2)); } +#ifdef CONFIG_TCP_MD5SIG + if (key) { + u32 *opt = (u32*)(t1 + 1); + opt[0] = htonl((TCPOPT_NOP << 24) | + (TCPOPT_NOP << 16) | + (TCPOPT_MD5SIG << 8) | + TCPOLEN_MD5SIG); + tcp_v6_do_calc_md5_hash((__u8*)&opt[1], + key, + &skb->nh.ipv6h->daddr, + &skb->nh.ipv6h->saddr, + t1, IPPROTO_TCP, + tot_len); + } +#endif + buff->csum = csum_partial((char *)t1, sizeof(*t1), 0); memset(&fl, 0, sizeof(fl)); @@ -637,15 +1075,37 @@ static void tcp_v6_send_reset(struct sk_buff *skb) kfree_skb(buff); } -static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) +static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, + struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) { struct tcphdr *th = skb->h.th, *t1; struct sk_buff *buff; struct flowi fl; int tot_len = sizeof(struct tcphdr); + u32 *topt; +#ifdef CONFIG_TCP_MD5SIG + struct tcp_md5sig_key *key; + struct tcp_md5sig_key tw_key; +#endif + +#ifdef CONFIG_TCP_MD5SIG + if (!tw && skb->sk) { + key = tcp_v6_md5_do_lookup(skb->sk, &skb->nh.ipv6h->daddr); + } else if (tw && tw->tw_md5_keylen) { + tw_key.key = tw->tw_md5_key; + tw_key.keylen = tw->tw_md5_keylen; + key = &tw_key; + } else { + key = NULL; + } +#endif if (ts) tot_len += TCPOLEN_TSTAMP_ALIGNED; +#ifdef CONFIG_TCP_MD5SIG + if (key) + tot_len += TCPOLEN_MD5SIG_ALIGNED; +#endif buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, GFP_ATOMIC); @@ -665,15 +1125,29 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 t1->ack_seq = htonl(ack); t1->ack = 1; t1->window = htons(win); + + topt = (u32*)(t1 + 1); if (ts) { - u32 *ptr = (u32*)(t1 + 1); - *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | - (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); - *ptr++ = htonl(tcp_time_stamp); - *ptr = htonl(ts); + *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | + (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); + *topt++ = htonl(tcp_time_stamp); + *topt = htonl(ts); } +#ifdef CONFIG_TCP_MD5SIG + if (key) { + *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | + (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); + tcp_v6_do_calc_md5_hash((__u8 *)topt, + key, + &skb->nh.ipv6h->daddr, + &skb->nh.ipv6h->saddr, + t1, IPPROTO_TCP, + tot_len); + } +#endif + buff->csum = csum_partial((char *)t1, tot_len, 0); memset(&fl, 0, sizeof(fl)); @@ -704,9 +1178,9 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) { struct inet_timewait_sock *tw = inet_twsk(sk); - const struct tcp_timewait_sock *tcptw = tcp_twsk(sk); + struct tcp_timewait_sock *tcptw = tcp_twsk(sk); - tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, + tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent); @@ -715,7 +1189,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) { - tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent); + tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent); } @@ -786,6 +1260,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) if (req == NULL) goto drop; +#ifdef CONFIG_TCP_MD5SIG + tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops; +#endif + tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); tmp_opt.user_mss = tp->rx_opt.user_mss; @@ -844,6 +1322,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, struct tcp_sock *newtp; struct sock *newsk; struct ipv6_txoptions *opt; +#ifdef CONFIG_TCP_MD5SIG + struct tcp_md5sig_key *key; +#endif if (skb->protocol == htons(ETH_P_IP)) { /* @@ -874,6 +1355,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; newsk->sk_backlog_rcv = tcp_v4_do_rcv; +#ifdef CONFIG_TCP_MD5SIG + newtp->af_specific = &tcp_sock_ipv6_mapped_specific; +#endif + newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = inet6_iif(skb); @@ -1008,6 +1493,23 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; +#ifdef CONFIG_TCP_MD5SIG + /* Copy over the MD5 key from the original socket */ + if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) { + /* We're using one, so create a matching key + * on the newsk structure. If we fail to get + * memory, then we end up not copying the key + * across. Shucks. + */ + char *newkey = kmalloc(key->keylen, GFP_ATOMIC); + if (newkey) { + memcpy(newkey, key->key, key->keylen); + tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr, + newkey, key->keylen); + } + } +#endif + __inet6_hash(&tcp_hashinfo, newsk); inet_inherit_port(&tcp_hashinfo, sk, newsk); @@ -1067,6 +1569,11 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_do_rcv(sk, skb); +#ifdef CONFIG_TCP_MD5SIG + if (tcp_v6_inbound_md5_hash (sk, skb)) + goto discard; +#endif + if (sk_filter(sk, skb)) goto discard; @@ -1132,7 +1639,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) return 0; reset: - tcp_v6_send_reset(skb); + tcp_v6_send_reset(sk, skb); discard: if (opt_skb) __kfree_skb(opt_skb); @@ -1257,7 +1764,7 @@ no_tcp_socket: bad_packet: TCP_INC_STATS_BH(TCP_MIB_INERRS); } else { - tcp_v6_send_reset(skb); + tcp_v6_send_reset(NULL, skb); } discard_it: @@ -1336,6 +1843,15 @@ static struct inet_connection_sock_af_ops ipv6_specific = { #endif }; +static struct tcp_sock_af_ops tcp_sock_ipv6_specific = { +#ifdef CONFIG_TCP_MD5SIG + .md5_lookup = tcp_v6_md5_lookup, + .calc_md5_hash = tcp_v6_calc_md5_hash, + .md5_add = tcp_v6_md5_add_func, + .md5_parse = tcp_v6_parse_md5_keys, +#endif +}; + /* * TCP over IPv4 via INET6 API */ @@ -1358,6 +1874,15 @@ static struct inet_connection_sock_af_ops ipv6_mapped = { #endif }; +static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { +#ifdef CONFIG_TCP_MD5SIG + .md5_lookup = tcp_v4_md5_lookup, + .calc_md5_hash = tcp_v4_calc_md5_hash, + .md5_add = tcp_v6_md5_add_func, + .md5_parse = tcp_v6_parse_md5_keys, +#endif +}; + /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ @@ -1397,6 +1922,10 @@ static int tcp_v6_init_sock(struct sock *sk) sk->sk_write_space = sk_stream_write_space; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); +#ifdef CONFIG_TCP_MD5SIG + tp->af_specific = &tcp_sock_ipv6_specific; +#endif + sk->sk_sndbuf = sysctl_tcp_wmem[1]; sk->sk_rcvbuf = sysctl_tcp_rmem[1]; @@ -1407,6 +1936,11 @@ static int tcp_v6_init_sock(struct sock *sk) static int tcp_v6_destroy_sock(struct sock *sk) { +#ifdef CONFIG_TCP_MD5SIG + /* Clean up the MD5 key list */ + if (tcp_sk(sk)->md5sig_info) + tcp_v6_clear_md5_list(sk); +#endif tcp_v4_destroy_sock(sk); return inet6_destroy_sock(sk); } -- cgit v1.2.3 From 714e85be3557222bc25f69c252326207c900a7db Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 14 Nov 2006 20:51:49 -0800 Subject: [IPV6]: Assorted trivial endianness annotations. Signed-off-by: Al Viro Signed-off-by: David S. Miller --- include/linux/inetdevice.h | 14 ++++++++------ include/net/arp.h | 2 +- include/net/ip.h | 11 ++++++----- net/ipv4/af_inet.c | 6 +++--- net/ipv4/arp.c | 2 +- net/ipv4/cipso_ipv4.c | 8 ++++---- net/ipv4/devinet.c | 10 +++++----- net/ipv4/ip_output.c | 2 +- net/ipv4/ip_sockglue.c | 2 +- net/ipv4/raw.c | 4 ++-- net/ipv4/route.c | 4 ++-- net/ipv4/syncookies.c | 18 +++++++++--------- net/ipv4/tcp_ipv4.c | 10 +++++----- net/ipv4/tcp_minisocks.c | 2 +- net/ipv4/tcp_timer.c | 2 +- 15 files changed, 50 insertions(+), 47 deletions(-) (limited to 'net/ipv4/tcp_ipv4.c') diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 5a0ab04627bc..c0f7aec331c2 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -124,12 +124,13 @@ static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) * Check if a mask is acceptable. */ -static __inline__ int bad_mask(u32 mask, u32 addr) +static __inline__ int bad_mask(__be32 mask, __be32 addr) { + __u32 hmask; if (addr & (mask = ~mask)) return 1; - mask = ntohl(mask); - if (mask & (mask+1)) + hmask = ntohl(mask); + if (hmask & (hmask+1)) return 1; return 0; } @@ -190,11 +191,12 @@ static __inline__ __be32 inet_make_mask(int logmask) return 0; } -static __inline__ int inet_mask_len(__u32 mask) +static __inline__ int inet_mask_len(__be32 mask) { - if (!(mask = ntohl(mask))) + __u32 hmask = ntohl(mask); + if (!hmask) return 0; - return 32 - ffz(~mask); + return 32 - ffz(~hmask); } diff --git a/include/net/arp.h b/include/net/arp.h index 6a3d9a7d302b..f02664568600 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -16,7 +16,7 @@ extern void arp_send(int type, int ptype, __be32 dest_ip, struct net_device *dev, __be32 src_ip, unsigned char *dest_hw, unsigned char *src_hw, unsigned char *th); extern int arp_bind_neighbour(struct dst_entry *dst); -extern int arp_mc_map(u32 addr, u8 *haddr, struct net_device *dev, int dir); +extern int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir); extern void arp_ifdown(struct net_device *dev); extern struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, diff --git a/include/net/ip.h b/include/net/ip.h index 949fa8683626..412e8114667d 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -123,7 +123,7 @@ extern int ip4_datagram_connect(struct sock *sk, * multicast packets. */ -static inline void ip_tr_mc_map(u32 addr, char *buf) +static inline void ip_tr_mc_map(__be32 addr, char *buf) { buf[0]=0xC0; buf[1]=0x00; @@ -238,9 +238,9 @@ static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst * Map a multicast IP onto multicast MAC for type ethernet. */ -static inline void ip_eth_mc_map(u32 addr, char *buf) +static inline void ip_eth_mc_map(__be32 naddr, char *buf) { - addr=ntohl(addr); + __u32 addr=ntohl(naddr); buf[0]=0x01; buf[1]=0x00; buf[2]=0x5e; @@ -256,13 +256,14 @@ static inline void ip_eth_mc_map(u32 addr, char *buf) * Leave P_Key as 0 to be filled in by driver. */ -static inline void ip_ib_mc_map(u32 addr, char *buf) +static inline void ip_ib_mc_map(__be32 naddr, char *buf) { + __u32 addr; buf[0] = 0; /* Reserved */ buf[1] = 0xff; /* Multicast QPN */ buf[2] = 0xff; buf[3] = 0xff; - addr = ntohl(addr); + addr = ntohl(naddr); buf[4] = 0xff; buf[5] = 0x12; /* link local scope */ buf[6] = 0x40; /* IPv4 signature */ diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 8db39f7e3bf0..1144900d37f6 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -644,7 +644,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr, sin->sin_port = inet->dport; sin->sin_addr.s_addr = inet->daddr; } else { - __u32 addr = inet->rcv_saddr; + __be32 addr = inet->rcv_saddr; if (!addr) addr = inet->saddr; sin->sin_port = inet->sport; @@ -995,8 +995,8 @@ static int inet_sk_reselect_saddr(struct sock *sk) struct inet_sock *inet = inet_sk(sk); int err; struct rtable *rt; - __u32 old_saddr = inet->saddr; - __u32 new_saddr; + __be32 old_saddr = inet->saddr; + __be32 new_saddr; __be32 daddr = inet->daddr; if (inet->opt && inet->opt->srr) diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index cfb5d3de9c84..3981e8be9ab8 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -203,7 +203,7 @@ struct neigh_table arp_tbl = { .gc_thresh3 = 1024, }; -int arp_mc_map(u32 addr, u8 *haddr, struct net_device *dev, int dir) +int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir) { switch (dev->type) { case ARPHRD_ETHER: diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 6460233407c7..c3a61ebbadef 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -966,7 +966,7 @@ static int cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; - *(u32 *)&buf[2] = htonl(doi_def->doi); + *(__be32 *)&buf[2] = htonl(doi_def->doi); return 0; } @@ -1140,7 +1140,7 @@ int cipso_v4_validate(unsigned char **option) } rcu_read_lock(); - doi_def = cipso_v4_doi_getdef(ntohl(*((u32 *)&opt[2]))); + doi_def = cipso_v4_doi_getdef(ntohl(*((__be32 *)&opt[2]))); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; @@ -1370,7 +1370,7 @@ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) if (ret_val == 0) return ret_val; - doi = ntohl(*(u32 *)&cipso_ptr[2]); + doi = ntohl(*(__be32 *)&cipso_ptr[2]); rcu_read_lock(); doi_def = cipso_v4_doi_getdef(doi); if (doi_def == NULL) { @@ -1436,7 +1436,7 @@ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, if (cipso_v4_cache_check(cipso_ptr, cipso_ptr[1], secattr) == 0) return 0; - doi = ntohl(*(u32 *)&cipso_ptr[2]); + doi = ntohl(*(__be32 *)&cipso_ptr[2]); rcu_read_lock(); doi_def = cipso_v4_doi_getdef(doi); if (doi_def == NULL) diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index f38cbbae0ae3..a1b356c8aa59 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -577,20 +577,20 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg * Determine a default network mask, based on the IP address. */ -static __inline__ int inet_abc_len(u32 addr) +static __inline__ int inet_abc_len(__be32 addr) { int rc = -1; /* Something else, probably a multicast. */ if (ZERONET(addr)) rc = 0; else { - addr = ntohl(addr); + __u32 haddr = ntohl(addr); - if (IN_CLASSA(addr)) + if (IN_CLASSA(haddr)) rc = 8; - else if (IN_CLASSB(addr)) + else if (IN_CLASSB(haddr)) rc = 16; - else if (IN_CLASSC(addr)) + else if (IN_CLASSC(haddr)) rc = 24; } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index def32d8d3b06..90942a384a45 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -341,7 +341,7 @@ packet_routed: /* OK, we know where to send it, allocate and build IP header. */ iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); - *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); + *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); iph->tot_len = htons(skb->len); if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok) iph->frag_off = htons(IP_DF); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 4b132953bcc2..57d4bae6f080 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -355,7 +355,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) sin = (struct sockaddr_in *)msg->msg_name; if (sin) { sin->sin_family = AF_INET; - sin->sin_addr.s_addr = *(u32*)(skb->nh.raw + serr->addr_offset); + sin->sin_addr.s_addr = *(__be32*)(skb->nh.raw + serr->addr_offset); sin->sin_port = serr->port; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 5c31dead2bdc..a6c63bbd9ddb 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -854,8 +854,8 @@ static void raw_seq_stop(struct seq_file *seq, void *v) static __inline__ char *get_raw_sock(struct sock *sp, char *tmpbuf, int i) { struct inet_sock *inet = inet_sk(sp); - unsigned int dest = inet->daddr, - src = inet->rcv_saddr; + __be32 dest = inet->daddr, + src = inet->rcv_saddr; __u16 destp = 0, srcp = inet->num; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index d7152b2b2c64..ee00b6506ab4 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -566,8 +566,8 @@ static inline u32 rt_score(struct rtable *rt) static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) { - return ((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | - (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) | + return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | + (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) | (fl1->mark ^ fl2->mark) | (*(u16 *)&fl1->nl_u.ip4_u.tos ^ *(u16 *)&fl2->nl_u.ip4_u.tos) | diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 661e0a4bca72..6b19530905af 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -35,23 +35,23 @@ module_init(init_syncookies); #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) -static u32 cookie_hash(u32 saddr, u32 daddr, u32 sport, u32 dport, +static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, u32 count, int c) { __u32 tmp[16 + 5 + SHA_WORKSPACE_WORDS]; memcpy(tmp + 3, syncookie_secret[c], sizeof(syncookie_secret[c])); - tmp[0] = saddr; - tmp[1] = daddr; - tmp[2] = (sport << 16) + dport; + tmp[0] = (__force u32)saddr; + tmp[1] = (__force u32)daddr; + tmp[2] = ((__force u32)sport << 16) + (__force u32)dport; tmp[3] = count; sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5); return tmp[17]; } -static __u32 secure_tcp_syn_cookie(__u32 saddr, __u32 daddr, __u16 sport, - __u16 dport, __u32 sseq, __u32 count, +static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport, + __be16 dport, __u32 sseq, __u32 count, __u32 data) { /* @@ -80,8 +80,8 @@ static __u32 secure_tcp_syn_cookie(__u32 saddr, __u32 daddr, __u16 sport, * "maxdiff" if the current (passed-in) "count". The return value * is (__u32)-1 if this test fails. */ -static __u32 check_tcp_syn_cookie(__u32 cookie, __u32 saddr, __u32 daddr, - __u16 sport, __u16 dport, __u32 sseq, +static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr, + __be16 sport, __be16 dport, __u32 sseq, __u32 count, __u32 maxdiff) { __u32 diff; @@ -220,7 +220,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, } ireq = inet_rsk(req); treq = tcp_rsk(req); - treq->rcv_isn = htonl(skb->h.th->seq) - 1; + treq->rcv_isn = ntohl(skb->h.th->seq) - 1; treq->snt_isn = cookie; req->mss = mss; ireq->rmt_port = skb->h.th->source; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 8c8e8112f98d..0ca8dead03b0 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -542,7 +542,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) struct { struct tcphdr th; #ifdef CONFIG_TCP_MD5SIG - u32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)]; + __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)]; #endif } rep; struct ip_reply_arg arg; @@ -618,9 +618,9 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, struct tcphdr *th = skb->h.th; struct { struct tcphdr th; - u32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) + __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) #ifdef CONFIG_TCP_MD5SIG - + (TCPOLEN_MD5SIG_ALIGNED >> 2) + + (TCPOLEN_MD5SIG_ALIGNED >> 2) #endif ]; } rep; @@ -2333,8 +2333,8 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i) struct tcp_sock *tp = tcp_sk(sp); const struct inet_connection_sock *icsk = inet_csk(sp); struct inet_sock *inet = inet_sk(sp); - unsigned int dest = inet->daddr; - unsigned int src = inet->rcv_saddr; + __be32 dest = inet->daddr; + __be32 src = inet->rcv_saddr; __u16 destp = ntohs(inet->dport); __u16 srcp = ntohs(inet->sport); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 4a0ee901a888..383cb38461c5 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -493,7 +493,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, struct request_sock **prev) { struct tcphdr *th = skb->h.th; - u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); + __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); int paws_reject = 0; struct tcp_options_received tmp_opt; struct sock *child; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index fb09ade5897b..3355c276b611 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -297,7 +297,7 @@ static void tcp_retransmit_timer(struct sock *sk) if (net_ratelimit()) { struct inet_sock *inet = inet_sk(sk); printk(KERN_DEBUG "TCP: Treason uncloaked! Peer %u.%u.%u.%u:%u/%u shrinks window %u:%u. Repaired.\n", - NIPQUAD(inet->daddr), htons(inet->dport), + NIPQUAD(inet->daddr), ntohs(inet->dport), inet->num, tp->snd_una, tp->snd_nxt); } #endif -- cgit v1.2.3 From b51655b958dfb1176bfcf99466231fdbef8751ff Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 14 Nov 2006 21:40:42 -0800 Subject: [NET]: Annotate __skb_checksum_complete() and friends. Signed-off-by: Al Viro Signed-off-by: David S. Miller --- include/linux/netfilter.h | 6 +++--- include/linux/netfilter_ipv4.h | 2 +- include/linux/netfilter_ipv6.h | 2 +- include/linux/skbuff.h | 2 +- include/net/tcp.h | 2 +- include/net/udp.h | 8 ++++---- net/core/datagram.c | 2 +- net/core/netpoll.c | 4 ++-- net/ipv4/netfilter.c | 4 ++-- net/ipv4/tcp_input.c | 4 ++-- net/ipv4/tcp_ipv4.c | 2 +- net/ipv6/netfilter.c | 4 ++-- net/ipv6/tcp_ipv6.c | 2 +- 13 files changed, 22 insertions(+), 22 deletions(-) (limited to 'net/ipv4/tcp_ipv4.c') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index b7e67d1d4382..707bb2e53c4e 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -290,7 +290,7 @@ extern u_int16_t nf_proto_csum_update(struct sk_buff *skb, struct nf_afinfo { unsigned short family; - unsigned int (*checksum)(struct sk_buff *skb, unsigned int hook, + __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); void (*saveroute)(const struct sk_buff *skb, struct nf_info *info); @@ -305,12 +305,12 @@ static inline struct nf_afinfo *nf_get_afinfo(unsigned short family) return rcu_dereference(nf_afinfo[family]); } -static inline unsigned int +static inline __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol, unsigned short family) { struct nf_afinfo *afinfo; - unsigned int csum = 0; + __sum16 csum = 0; rcu_read_lock(); afinfo = nf_get_afinfo(family); diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 5b63a231a76b..5821eb5a0a3e 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -79,7 +79,7 @@ enum nf_ip_hook_priorities { #ifdef __KERNEL__ extern int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type); extern int ip_xfrm_me_harder(struct sk_buff **pskb); -extern unsigned int nf_ip_checksum(struct sk_buff *skb, unsigned int hook, +extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); #endif /*__KERNEL__*/ diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index d97e268cdfe5..ab81a6dc94ea 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -74,7 +74,7 @@ enum nf_ip6_hook_priorities { #ifdef CONFIG_NETFILTER extern int ip6_route_me_harder(struct sk_buff *skb); -extern unsigned int nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, +extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); extern int ipv6_netfilter_init(void); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 874ca029fbb9..41753667541d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1398,7 +1398,7 @@ static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval * extern void __net_timestamp(struct sk_buff *skb); -extern unsigned int __skb_checksum_complete(struct sk_buff *skb); +extern __sum16 __skb_checksum_complete(struct sk_buff *skb); /** * skb_checksum_complete - Calculate checksum of an entire packet diff --git a/include/net/tcp.h b/include/net/tcp.h index 826aaecdb994..aa7989c53791 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -814,7 +814,7 @@ static inline __sum16 tcp_v4_check(struct tcphdr *th, int len, return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base); } -static inline int __tcp_checksum_complete(struct sk_buff *skb) +static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb) { return __skb_checksum_complete(skb); } diff --git a/include/net/udp.h b/include/net/udp.h index 39e825a6909a..c5ccd9a3387b 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -69,15 +69,15 @@ struct sk_buff; /* * Generic checksumming routines for UDP(-Lite) v4 and v6 */ -static inline u16 __udp_lib_checksum_complete(struct sk_buff *skb) +static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb) { if (! UDP_SKB_CB(skb)->partial_cov) return __skb_checksum_complete(skb); - return csum_fold(skb_checksum(skb, 0, UDP_SKB_CB(skb)->cscov, - skb->csum)); + return csum_fold(skb_checksum(skb, 0, UDP_SKB_CB(skb)->cscov, + skb->csum)); } -static __inline__ int udp_lib_checksum_complete(struct sk_buff *skb) +static inline __sum16 udp_lib_checksum_complete(struct sk_buff *skb) { return skb->ip_summed != CHECKSUM_UNNECESSARY && __udp_lib_checksum_complete(skb); diff --git a/net/core/datagram.c b/net/core/datagram.c index 0d9c9bac4006..797fdd4352ce 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -411,7 +411,7 @@ fault: return -EFAULT; } -unsigned int __skb_checksum_complete(struct sk_buff *skb) +__sum16 __skb_checksum_complete(struct sk_buff *skb) { __sum16 sum; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 26ee1791aa02..8be3681d3d80 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -77,8 +77,8 @@ static void queue_process(void *p) } } -static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, - unsigned short ulen, __be32 saddr, __be32 daddr) +static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, + unsigned short ulen, __be32 saddr, __be32 daddr) { __wsum psum; diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index b797a37c01ce..a68966059b50 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -162,11 +162,11 @@ static int nf_ip_reroute(struct sk_buff **pskb, const struct nf_info *info) return 0; } -unsigned int nf_ip_checksum(struct sk_buff *skb, unsigned int hook, +__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) { struct iphdr *iph = skb->nh.iph; - unsigned int csum = 0; + __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 6ab3423674bb..9304034c0c47 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3790,9 +3790,9 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) return err; } -static int __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) +static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) { - int result; + __sum16 result; if (sock_owned_by_user(sk)) { local_bh_enable(); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 0ca8dead03b0..dadf80272413 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1544,7 +1544,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) return sk; } -static int tcp_v4_checksum_init(struct sk_buff *skb) +static __sum16 tcp_v4_checksum_init(struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_COMPLETE) { if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr, diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 8d1b542806c1..f6294e5bcb31 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -80,11 +80,11 @@ static int nf_ip6_reroute(struct sk_buff **pskb, const struct nf_info *info) return 0; } -unsigned int nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, +__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) { struct ipv6hdr *ip6h = skb->nh.ipv6h; - unsigned int csum = 0; + __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 394bc54c5c21..147ce499f509 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1527,7 +1527,7 @@ out: return NULL; } -static int tcp_v6_checksum_init(struct sk_buff *skb) +static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_COMPLETE) { if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, -- cgit v1.2.3 From 7174259e6ced15bebee202983511d8fc950e929f Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 17 Nov 2006 10:57:30 -0200 Subject: [TCP_IPV4]: CodingStyle cleanups, no code change Mostly related to CONFIG_TCP_MD5SIG recent merge. Signed-off-by: Arnaldo Carvalho de Melo --- net/ipv4/tcp_ipv4.c | 143 +++++++++++++++++++++++++++------------------------- 1 file changed, 75 insertions(+), 68 deletions(-) (limited to 'net/ipv4/tcp_ipv4.c') diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index dadf80272413..010dff442a11 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -93,16 +93,18 @@ static struct socket *tcp_socket; void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); #ifdef CONFIG_TCP_MD5SIG -static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr); +static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, + __be32 addr); static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, - __be32 saddr, __be32 daddr, struct tcphdr *th, - int protocol, int tcplen); + __be32 saddr, __be32 daddr, + struct tcphdr *th, int protocol, + int tcplen); #endif struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { - .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock), - .lhash_users = ATOMIC_INIT(0), - .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), + .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock), + .lhash_users = ATOMIC_INIT(0), + .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), }; static int tcp_v4_get_port(struct sock *sk, unsigned short snum) @@ -215,13 +217,14 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (tcp_death_row.sysctl_tw_recycle && !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) { struct inet_peer *peer = rt_get_peer(rt); - - /* VJ's idea. We save last timestamp seen from - * the destination in peer table, when entering state TIME-WAIT - * and initialize rx_opt.ts_recent from it, when trying new connection. + /* + * VJ's idea. We save last timestamp seen from + * the destination in peer table, when entering state + * TIME-WAIT * and initialize rx_opt.ts_recent from it, + * when trying new connection. */ - - if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) { + if (peer != NULL && + peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) { tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; tp->rx_opt.ts_recent = peer->tcp_ts; } @@ -246,7 +249,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (err) goto failure; - err = ip_route_newports(&rt, IPPROTO_TCP, inet->sport, inet->dport, sk); + err = ip_route_newports(&rt, IPPROTO_TCP, + inet->sport, inet->dport, sk); if (err) goto failure; @@ -270,7 +274,10 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) return 0; failure: - /* This unhashes the socket and releases the local port, if necessary. */ + /* + * This unhashes the socket and releases the local port, + * if necessary. + */ tcp_set_state(sk, TCP_CLOSE); ip_rt_put(rt); sk->sk_route_caps = 0; @@ -495,7 +502,8 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) struct tcphdr *th = skb->h.th; if (skb->ip_summed == CHECKSUM_PARTIAL) { - th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0); + th->check = ~tcp_v4_check(th, len, + inet->saddr, inet->daddr, 0); skb->csum = offsetof(struct tcphdr, check); } else { th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr, @@ -572,7 +580,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) skb->len - (th->doff << 2)); } - memset(&arg, 0, sizeof arg); + memset(&arg, 0, sizeof(arg)); arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); @@ -595,9 +603,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) arg.iov[0].iov_len); } #endif - arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, - skb->nh.iph->saddr, /*XXX*/ + skb->nh.iph->saddr, /* XXX */ sizeof(struct tcphdr), IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; @@ -631,7 +638,7 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, #endif memset(&rep.th, 0, sizeof(struct tcphdr)); - memset(&arg, 0, sizeof arg); + memset(&arg, 0, sizeof(arg)); arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); @@ -668,9 +675,8 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, tw_key.key = twsk->tw_md5_key; tw_key.keylen = twsk->tw_md5_keylen; key = &tw_key; - } else { + } else key = NULL; - } if (key) { int offset = (ts) ? 3 : 0; @@ -690,9 +696,8 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, arg.iov[0].iov_len); } #endif - arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, - skb->nh.iph->saddr, /*XXX*/ + skb->nh.iph->saddr, /* XXX */ arg.iov[0].iov_len, IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; @@ -707,12 +712,14 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) struct tcp_timewait_sock *tcptw = tcp_twsk(sk); tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, - tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent); + tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, + tcptw->tw_ts_recent); inet_twsk_put(tw); } -static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) +static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, + struct request_sock *req) { tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, @@ -809,7 +816,8 @@ static struct ip_options *tcp_v4_save_options(struct sock *sk, */ /* Find the Key structure for an address. */ -static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) +static struct tcp_md5sig_key * + tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) { struct tcp_sock *tp = tcp_sk(sk); int i; @@ -818,7 +826,8 @@ static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) return NULL; for (i = 0; i < tp->md5sig_info->entries4; i++) { if (tp->md5sig_info->keys4[i].addr == addr) - return (struct tcp_md5sig_key *)&tp->md5sig_info->keys4[i]; + return (struct tcp_md5sig_key *) + &tp->md5sig_info->keys4[i]; } return NULL; } @@ -915,13 +924,12 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) if (tp->md5sig_info->entries4 == 0) { kfree(tp->md5sig_info->keys4); tp->md5sig_info->keys4 = NULL; - } else { + } else if (tp->md5sig_info->entries4 != i) { /* Need to do some manipulation */ - if (tp->md5sig_info->entries4 != i) - memcpy(&tp->md5sig_info->keys4[i], - &tp->md5sig_info->keys4[i+1], - (tp->md5sig_info->entries4 - i) - * sizeof (struct tcp4_md5sig_key)); + memcpy(&tp->md5sig_info->keys4[i], + &tp->md5sig_info->keys4[i+1], + (tp->md5sig_info->entries4 - i) * + sizeof(struct tcp4_md5sig_key)); } tcp_free_md5sig_pool(); return 0; @@ -932,7 +940,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) EXPORT_SYMBOL(tcp_v4_md5_do_del); -static void tcp_v4_clear_md5_list (struct sock *sk) +static void tcp_v4_clear_md5_list(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); @@ -954,8 +962,8 @@ static void tcp_v4_clear_md5_list (struct sock *sk) } } -static int tcp_v4_parse_md5_keys (struct sock *sk, char __user *optval, - int optlen) +static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, + int optlen) { struct tcp_md5sig cmd; struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; @@ -964,7 +972,7 @@ static int tcp_v4_parse_md5_keys (struct sock *sk, char __user *optval, if (optlen < sizeof(cmd)) return -EINVAL; - if (copy_from_user (&cmd, optval, sizeof(cmd))) + if (copy_from_user(&cmd, optval, sizeof(cmd))) return -EFAULT; if (sin->sin_family != AF_INET) @@ -981,9 +989,8 @@ static int tcp_v4_parse_md5_keys (struct sock *sk, char __user *optval, if (!tcp_sk(sk)->md5sig_info) { struct tcp_sock *tp = tcp_sk(sk); - struct tcp_md5sig_info *p; + struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL); - p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL); if (!p) return -EINVAL; @@ -1044,8 +1051,8 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, #ifdef CONFIG_TCP_MD5SIG_DEBUG printk("Calcuating hash for: "); - for (i = 0; i < sizeof (*bp); i++) - printk ("%02x ", (unsigned int)((unsigned char *)bp)[i]); + for (i = 0; i < sizeof(*bp); i++) + printk("%02x ", (unsigned int)((unsigned char *)bp)[i]); printk(" "); #endif @@ -1057,8 +1064,8 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, sg_set_buf(&sg[block++], th, sizeof(struct tcphdr)); nbytes += sizeof(struct tcphdr); #ifdef CONFIG_TCP_MD5SIG_DEBUG - for (i = 0; i < sizeof (struct tcphdr); i++) - printk (" %02x", (unsigned int)((unsigned char *)th)[i]); + for (i = 0; i < sizeof(struct tcphdr); i++) + printk(" %02x", (unsigned int)((unsigned char *)th)[i]); #endif /* 3. the TCP segment data (if any) */ data_len = tcplen - (th->doff << 2); @@ -1075,9 +1082,9 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, nbytes += key->keylen; #ifdef CONFIG_TCP_MD5SIG_DEBUG - printk (" and password: "); + printk(" and password: "); for (i = 0; i < key->keylen; i++) - printk ("%02x ", (unsigned int)key->key[i]); + printk("%02x ", (unsigned int)key->key[i]); #endif /* Now store the Hash into the packet */ @@ -1099,7 +1106,7 @@ out: #ifdef CONFIG_TCP_MD5SIG_DEBUG printk(" result:"); for (i = 0; i < 16; i++) - printk (" %02x", (unsigned int)(((u8*)md5_hash)[i])); + printk(" %02x", (unsigned int)(((u8*)md5_hash)[i])); printk("\n"); #endif return 0; @@ -1135,7 +1142,7 @@ int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, EXPORT_SYMBOL(tcp_v4_calc_md5_hash); -static int tcp_v4_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) +static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) { /* * This gets called for each TCP segment that arrives @@ -1149,7 +1156,7 @@ static int tcp_v4_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) struct tcp_md5sig_key *hash_expected; struct iphdr *iph = skb->nh.iph; struct tcphdr *th = skb->h.th; - int length = (th->doff << 2) - sizeof (struct tcphdr); + int length = (th->doff << 2) - sizeof(struct tcphdr); int genhash; unsigned char *ptr; unsigned char newhash[16]; @@ -1200,22 +1207,18 @@ done_opts: return 0; if (hash_expected && !hash_location) { - if (net_ratelimit()) { - printk(KERN_INFO "MD5 Hash NOT expected but found " + LIMIT_NETDEBUG(KERN_INFO "MD5 Hash NOT expected but found " "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n", - NIPQUAD (iph->saddr), ntohs(th->source), - NIPQUAD (iph->daddr), ntohs(th->dest)); - } + NIPQUAD(iph->saddr), ntohs(th->source), + NIPQUAD(iph->daddr), ntohs(th->dest)); return 1; } if (!hash_expected && hash_location) { - if (net_ratelimit()) { - printk(KERN_INFO "MD5 Hash NOT expected but found " + LIMIT_NETDEBUG(KERN_INFO "MD5 Hash NOT expected but found " "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n", - NIPQUAD (iph->saddr), ntohs(th->source), - NIPQUAD (iph->daddr), ntohs(th->dest)); - } + NIPQUAD(iph->saddr), ntohs(th->source), + NIPQUAD(iph->daddr), ntohs(th->dest)); return 1; } @@ -1232,15 +1235,16 @@ done_opts: if (net_ratelimit()) { printk(KERN_INFO "MD5 Hash failed for " "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n", - NIPQUAD (iph->saddr), ntohs(th->source), - NIPQUAD (iph->daddr), ntohs(th->dest), + NIPQUAD(iph->saddr), ntohs(th->source), + NIPQUAD(iph->daddr), ntohs(th->dest), genhash ? " tcp_v4_calc_md5_hash failed" : ""); #ifdef CONFIG_TCP_MD5SIG_DEBUG do { int i; printk("Received: "); for (i = 0; i < 16; i++) - printk("%02x ", 0xff & (int)hash_location[i]); + printk("%02x ", + 0xff & (int)hash_location[i]); printk("\n"); printk("Calculated: "); for (i = 0; i < 16; i++) @@ -1582,7 +1586,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) * o We're expecting an MD5'd packet and this is no MD5 tcp option * o There is an MD5 option and we're not expecting one */ - if (tcp_v4_inbound_md5_hash (sk, skb)) + if (tcp_v4_inbound_md5_hash(sk, skb)) goto discard; #endif @@ -2155,7 +2159,7 @@ static void *established_get_idx(struct seq_file *seq, loff_t pos) while (rc && pos) { rc = established_get_next(seq, rc); --pos; - } + } return rc; } @@ -2284,7 +2288,7 @@ int tcp_proc_register(struct tcp_seq_afinfo *afinfo) afinfo->seq_fops->read = seq_read; afinfo->seq_fops->llseek = seq_lseek; afinfo->seq_fops->release = seq_release_private; - + p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops); if (p) p->data = afinfo; @@ -2298,7 +2302,7 @@ void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo) if (!afinfo) return; proc_net_remove(afinfo->name); - memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); + memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); } static void get_openreq4(struct sock *sk, struct request_sock *req, @@ -2356,7 +2360,8 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i) "%08X %5d %8d %lu %d %p %u %u %u %u %d", i, src, srcp, dest, destp, sp->sk_state, tp->write_seq - tp->snd_una, - (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), + sp->sk_state == TCP_LISTEN ? sp->sk_ack_backlog : + (tp->rcv_nxt - tp->copied_seq), timer_active, jiffies_to_clock_t(timer_expires - jiffies), icsk->icsk_retransmits, @@ -2371,7 +2376,8 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i) tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh); } -static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i) +static void get_timewait4_sock(struct inet_timewait_sock *tw, + char *tmpbuf, int i) { __be32 dest, src; __u16 destp, srcp; @@ -2484,7 +2490,8 @@ struct proto tcp_prot = { void __init tcp_v4_init(struct net_proto_family *ops) { - if (inet_csk_ctl_sock_create(&tcp_socket, PF_INET, SOCK_RAW, IPPROTO_TCP) < 0) + if (inet_csk_ctl_sock_create(&tcp_socket, PF_INET, SOCK_RAW, + IPPROTO_TCP) < 0) panic("Failed to create the TCP control socket.\n"); } -- cgit v1.2.3 From f6685938f9181e95f814edfca287d4f04a925240 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 17 Nov 2006 11:06:01 -0200 Subject: [TCP_IPV4]: Use kmemdup where appropriate Also use a variable to avoid the longish tp->md5sig_info-> use in tcp_v4_md5_do_add. Code diff stats: [acme@newtoy net-2.6.20]$ codiff /tmp/tcp_ipv4.o.before /tmp/tcp_ipv4.o.after /pub/scm/linux/kernel/git/acme/net-2.6.20/net/ipv4/tcp_ipv4.c: tcp_v4_md5_do_add | -62 tcp_v4_syn_recv_sock | -32 tcp_v4_parse_md5_keys | -86 3 functions changed, 180 bytes removed [acme@newtoy net-2.6.20]$ Signed-off-by: Arnaldo Carvalho de Melo --- net/ipv4/tcp_ipv4.c | 49 +++++++++++++++++++++++++------------------------ 1 file changed, 25 insertions(+), 24 deletions(-) (limited to 'net/ipv4/tcp_ipv4.c') diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 010dff442a11..b7d5522092eb 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -855,15 +855,18 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, struct tcp_sock *tp = tcp_sk(sk); struct tcp4_md5sig_key *keys; - key = (struct tcp4_md5sig_key *) tcp_v4_md5_do_lookup(sk, addr); + key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr); if (key) { /* Pre-existing entry - just update that one. */ - kfree (key->key); + kfree(key->key); key->key = newkey; key->keylen = newkeylen; } else { + struct tcp_md5sig_info *md5sig; + if (!tp->md5sig_info) { - tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC); + tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), + GFP_ATOMIC); if (!tp->md5sig_info) { kfree(newkey); return -ENOMEM; @@ -873,30 +876,31 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, kfree(newkey); return -ENOMEM; } - if (tp->md5sig_info->alloced4 == tp->md5sig_info->entries4) { - keys = kmalloc((sizeof(struct tcp4_md5sig_key) * - (tp->md5sig_info->entries4 + 1)), GFP_ATOMIC); + md5sig = tp->md5sig_info; + + if (md5sig->alloced4 == md5sig->entries4) { + keys = kmalloc((sizeof(*keys) * + (md5sig->entries4 + 1)), GFP_ATOMIC); if (!keys) { kfree(newkey); tcp_free_md5sig_pool(); return -ENOMEM; } - if (tp->md5sig_info->entries4) - memcpy(keys, tp->md5sig_info->keys4, - (sizeof (struct tcp4_md5sig_key) * - tp->md5sig_info->entries4)); + if (md5sig->entries4) + memcpy(keys, md5sig->keys4, + sizeof(*keys) * md5sig->entries4); /* Free old key list, and reference new one */ - if (tp->md5sig_info->keys4) - kfree(tp->md5sig_info->keys4); - tp->md5sig_info->keys4 = keys; - tp->md5sig_info->alloced4++; + if (md5sig->keys4) + kfree(md5sig->keys4); + md5sig->keys4 = keys; + md5sig->alloced4++; } - tp->md5sig_info->entries4++; - tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].addr = addr; - tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].key = newkey; - tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].keylen = newkeylen; + md5sig->entries4++; + md5sig->keys4[md5sig->entries4 - 1].addr = addr; + md5sig->keys4[md5sig->entries4 - 1].key = newkey; + md5sig->keys4[md5sig->entries4 - 1].keylen = newkeylen; } return 0; } @@ -998,10 +1002,9 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, } - newkey = kmalloc(cmd.tcpm_keylen, GFP_KERNEL); + newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); if (!newkey) return -ENOMEM; - memcpy(newkey, cmd.tcpm_key, cmd.tcpm_keylen); return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr, newkey, cmd.tcpm_keylen); } @@ -1494,12 +1497,10 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, * memory, then we end up not copying the key * across. Shucks. */ - char *newkey = kmalloc(key->keylen, GFP_ATOMIC); - if (newkey) { - memcpy(newkey, key->key, key->keylen); + char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); + if (newkey != NULL) tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr, newkey, key->keylen); - } } #endif -- cgit v1.2.3 From 8e5200f54062b8af0ed1d186ea0f113854786d89 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Nov 2006 18:06:37 -0800 Subject: [NET]: Fix assorted misannotations (from md5 and udplite merges). Signed-off-by: Al Viro Signed-off-by: David S. Miller --- include/net/tcp.h | 2 +- include/net/udp.h | 2 +- include/net/udplite.h | 4 ++-- net/ipv4/tcp_ipv4.c | 2 +- net/ipv4/udp.c | 4 ++-- net/ipv6/tcp_ipv6.c | 4 ++-- net/ipv6/udp.c | 2 +- net/ipv6/udplite.c | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) (limited to 'net/ipv4/tcp_ipv4.c') diff --git a/include/net/tcp.h b/include/net/tcp.h index aa7989c53791..c99774f15eba 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1154,7 +1154,7 @@ extern int tcp_v4_md5_do_add(struct sock *sk, u8 newkeylen); extern int tcp_v4_md5_do_del(struct sock *sk, - u32 addr); + __be32 addr); extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void); extern void tcp_free_md5sig_pool(void); diff --git a/include/net/udp.h b/include/net/udp.h index c5ccd9a3387b..eac69ff0582c 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -77,7 +77,7 @@ static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb) skb->csum)); } -static inline __sum16 udp_lib_checksum_complete(struct sk_buff *skb) +static inline int udp_lib_checksum_complete(struct sk_buff *skb) { return skb->ip_summed != CHECKSUM_UNNECESSARY && __udp_lib_checksum_complete(skb); diff --git a/include/net/udplite.h b/include/net/udplite.h index 3abaab7b78c6..67ac51424307 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -90,9 +90,9 @@ static __inline__ int udplite6_csum_init(struct sk_buff *skb, struct udphdr *uh) int rc = udplite_checksum_init(skb, uh); if (!rc) - skb->csum = ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, + skb->csum = ~csum_unfold(csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, - skb->len, IPPROTO_UDPLITE, 0); + skb->len, IPPROTO_UDPLITE, 0)); return rc; } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index b7d5522092eb..e9d467124c4d 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1020,7 +1020,7 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, #ifdef CONFIG_TCP_MD5SIG_DEBUG int i; #endif - __u16 old_checksum; + __sum16 old_checksum; struct tcp_md5sig_pool *hp; struct tcp4_pseudohdr *bp; struct hash_desc *desc; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 7eb76fbf1b4b..28e4cf662ce0 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -114,7 +114,7 @@ DEFINE_RWLOCK(udp_hash_lock); static int udp_port_rover; -static inline int __udp_lib_lport_inuse(__be16 num, struct hlist_head udptable[]) +static inline int __udp_lib_lport_inuse(__u16 num, struct hlist_head udptable[]) { struct sock *sk; struct hlist_node *node; @@ -455,7 +455,7 @@ static int udp_push_pending_frames(struct sock *sk, struct udp_sock *up) struct sk_buff *skb; struct udphdr *uh; int err = 0; - u32 csum = 0; + __wsum csum = 0; /* Grab the skbuff where UDP header space exists. */ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index d2170da77e5b..0adb337c4b7e 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -739,7 +739,7 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, struct scatterlist sg[4]; __u16 data_len; int block = 0; - __u16 cksum; + __sum16 cksum; struct tcp_md5sig_pool *hp; struct tcp6_pseudohdr *bp; struct hash_desc *desc; @@ -1032,7 +1032,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) #ifdef CONFIG_TCP_MD5SIG if (key) { - u32 *opt = (u32*)(t1 + 1); + __be32 *opt = (__be32*)(t1 + 1); opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index efa8950ddd30..b3ea8af50a9b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -248,7 +248,7 @@ out: static __inline__ void udpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, int type, - int code, int offset, __u32 info ) + int code, int offset, __be32 info ) { return __udp6_lib_err(skb, opt, type, code, offset, info, udp_hash); } diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index e0ec5e63004a..d4cafacc235b 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c @@ -24,7 +24,7 @@ static __inline__ int udplitev6_rcv(struct sk_buff **pskb) static __inline__ void udplitev6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, - int type, int code, int offset, __u32 info) + int type, int code, int offset, __be32 info) { return __udp6_lib_err(skb, opt, type, code, offset, info, udplite_hash); } -- cgit v1.2.3 From ff1dcadb1b55dbf471c5ed109dbbdf06bd19ef3b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Nov 2006 18:07:29 -0800 Subject: [NET]: Split skb->csum ... into anonymous union of __wsum and __u32 (csum and csum_offset resp.) Signed-off-by: Al Viro Signed-off-by: David S. Miller --- drivers/net/cassini.c | 2 +- drivers/net/e1000/e1000_main.c | 2 +- drivers/net/ixgb/ixgb_main.c | 2 +- drivers/net/myri10ge/myri10ge.c | 2 +- drivers/net/sk98lin/skge.c | 4 ++-- drivers/net/skge.c | 2 +- drivers/net/sky2.c | 2 +- drivers/net/sungem.c | 2 +- drivers/net/sunhme.c | 2 +- include/linux/skbuff.h | 5 ++++- net/core/dev.c | 4 ++-- net/core/skbuff.c | 2 +- net/ipv4/tcp_ipv4.c | 4 ++-- net/ipv4/udp.c | 2 +- net/ipv6/tcp_ipv6.c | 4 ++-- 15 files changed, 22 insertions(+), 19 deletions(-) (limited to 'net/ipv4/tcp_ipv4.c') diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 521c5b71023c..fd2cc13f7d97 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -2825,7 +2825,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, u64 csum_start_off, csum_stuff_off; csum_start_off = (u64) (skb->h.raw - skb->data); - csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data); + csum_stuff_off = csum_start_off + skb->csum_offset; ctrl = TX_DESC_CSUM_EN | CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 7a0828869ecf..32dde0adb683 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -2826,7 +2826,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, context_desc = E1000_CONTEXT_DESC(*tx_ring, i); context_desc->upper_setup.tcp_fields.tucss = css; - context_desc->upper_setup.tcp_fields.tucso = css + skb->csum; + context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; context_desc->upper_setup.tcp_fields.tucse = 0; context_desc->tcp_seg_setup.data = 0; context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index e09f575a3a38..7b127212e62b 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1249,7 +1249,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { struct ixgb_buffer *buffer_info; css = skb->h.raw - skb->data; - cso = (skb->h.raw + skb->csum) - skb->data; + cso = css + skb->csum_offset; i = adapter->tx_ring.next_to_use; context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i); diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 806081b59733..36350e6db1c1 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -1955,7 +1955,7 @@ again: flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST); if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { cksum_offset = (skb->h.raw - skb->data); - pseudo_hdr_offset = (skb->h.raw + skb->csum) - skb->data; + pseudo_hdr_offset = cksum_offset + skb->csum_offset; /* If the headers are excessively large, then we must * fall back to a software checksum */ if (unlikely(cksum_offset > 255 || pseudo_hdr_offset > 127)) { diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c index a5d41ebc9fb4..12cbfd190dd7 100644 --- a/drivers/net/sk98lin/skge.c +++ b/drivers/net/sk98lin/skge.c @@ -1562,7 +1562,7 @@ struct sk_buff *pMessage) /* pointer to send-message */ if (pMessage->ip_summed == CHECKSUM_PARTIAL) { u16 hdrlen = pMessage->h.raw - pMessage->data; - u16 offset = hdrlen + pMessage->csum; + u16 offset = hdrlen + pMessage->csum_offset; if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) && (pAC->GIni.GIChipRev == 0) && @@ -1681,7 +1681,7 @@ struct sk_buff *pMessage) /* pointer to send-message */ */ if (pMessage->ip_summed == CHECKSUM_PARTIAL) { u16 hdrlen = pMessage->h.raw - pMessage->data; - u16 offset = hdrlen + pMessage->csum; + u16 offset = hdrlen + pMessage->csum_offset; Control = BMU_STFWD; diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 27b537c8d5e3..5513907e8393 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -2565,7 +2565,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) td->csum_offs = 0; td->csum_start = offset; - td->csum_write = offset + skb->csum; + td->csum_write = offset + skb->csum_offset; } else control = BMU_CHECK; diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 0ef1848b9761..842abd9396c6 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -1350,7 +1350,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) u32 tcpsum; tcpsum = offset << 16; /* sum start */ - tcpsum |= offset + skb->csum; /* sum write */ + tcpsum |= offset + skb->csum_offset; /* sum write */ ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; if (skb->nh.iph->protocol == IPPROTO_UDP) diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 253e96e7ad20..334c6cfd6595 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -1030,7 +1030,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) u64 csum_start_off, csum_stuff_off; csum_start_off = (u64) (skb->h.raw - skb->data); - csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data); + csum_stuff_off = csum_start_off + skb->csum_offset; ctrl = (TXDCTRL_CENAB | (csum_start_off << 15) | diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index 9d7cd130c19d..ec432ea879fb 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c @@ -2272,7 +2272,7 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) u32 csum_start_off, csum_stuff_off; csum_start_off = (u32) (skb->h.raw - skb->data); - csum_stuff_off = (u32) ((skb->h.raw + skb->csum) - skb->data); + csum_stuff_off = csum_start_off + skb->csum_offset; tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE | ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index fcab543d79ac..14ec16d2d9ba 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -274,7 +274,10 @@ struct sk_buff { unsigned int len, data_len, mac_len; - __wsum csum; + union { + __wsum csum; + __u32 csum_offset; + }; __u32 priority; __u8 local_df:1, cloned:1, diff --git a/net/core/dev.c b/net/core/dev.c index 1a36b17f4b51..59d058a3b504 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1191,9 +1191,9 @@ int skb_checksum_help(struct sk_buff *skb) offset = skb->tail - skb->h.raw; BUG_ON(offset <= 0); - BUG_ON(skb->csum + 2 > offset); + BUG_ON(skb->csum_offset + 2 > offset); - *(__sum16*)(skb->h.raw + skb->csum) = csum_fold(csum); + *(__sum16*)(skb->h.raw + skb->csum_offset) = csum_fold(csum); out_set_summed: skb->ip_summed = CHECKSUM_NONE; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 07c25d601922..a90bc439488e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1414,7 +1414,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) skb->len - csstart, 0); if (skb->ip_summed == CHECKSUM_PARTIAL) { - long csstuff = csstart + skb->csum; + long csstuff = csstart + skb->csum_offset; *((__sum16 *)(to + csstuff)) = csum_fold(csum); } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index e9d467124c4d..4913f25e5ad5 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -504,7 +504,7 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) if (skb->ip_summed == CHECKSUM_PARTIAL) { th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0); - skb->csum = offsetof(struct tcphdr, check); + skb->csum_offset = offsetof(struct tcphdr, check); } else { th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr, csum_partial((char *)th, @@ -526,7 +526,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb) th->check = 0; th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0); - skb->csum = offsetof(struct tcphdr, check); + skb->csum_offset = offsetof(struct tcphdr, check); skb->ip_summed = CHECKSUM_PARTIAL; return 0; } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 28e4cf662ce0..1807a30694d9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -422,7 +422,7 @@ static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, /* * Only one fragment on the socket. */ - skb->csum = offsetof(struct udphdr, check); + skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0); } else { /* diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 0adb337c4b7e..517c50024bfc 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -948,7 +948,7 @@ static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) if (skb->ip_summed == CHECKSUM_PARTIAL) { th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); - skb->csum = offsetof(struct tcphdr, check); + skb->csum_offset = offsetof(struct tcphdr, check); } else { th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, csum_partial((char *)th, th->doff<<2, @@ -970,7 +970,7 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb) th->check = 0; th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, IPPROTO_TCP, 0); - skb->csum = offsetof(struct tcphdr, check); + skb->csum_offset = offsetof(struct tcphdr, check); skb->ip_summed = CHECKSUM_PARTIAL; return 0; } -- cgit v1.2.3 From 08dd1a506bbc4528db60dfdfff61423a1608ed3f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 30 Nov 2006 16:35:01 -0800 Subject: [TCP] MD5SIG: Kill CONFIG_TCP_MD5SIG_DEBUG. It just obfuscates the code and adds limited value. And as Adrian Bunk noticed, it lacked Kconfig help text too, so just kill it. Signed-off-by: David S. Miller --- net/ipv4/Kconfig | 4 ---- net/ipv4/tcp_ipv4.c | 41 +---------------------------------------- 2 files changed, 1 insertion(+), 44 deletions(-) (limited to 'net/ipv4/tcp_ipv4.c') diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 39e0cb763588..503e7059e312 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -630,9 +630,5 @@ config TCP_MD5SIG If unsure, say N. -config TCP_MD5SIG_DEBUG - bool "TCP: MD5 Signature Option debugging" - depends on TCP_MD5SIG - source "net/ipv4/ipvs/Kconfig" diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 4913f25e5ad5..dd3509a59fe0 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1017,9 +1017,6 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, struct scatterlist sg[4]; __u16 data_len; int block = 0; -#ifdef CONFIG_TCP_MD5SIG_DEBUG - int i; -#endif __sum16 old_checksum; struct tcp_md5sig_pool *hp; struct tcp4_pseudohdr *bp; @@ -1052,13 +1049,6 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, sg_set_buf(&sg[block++], bp, sizeof(*bp)); nbytes += sizeof(*bp); -#ifdef CONFIG_TCP_MD5SIG_DEBUG - printk("Calcuating hash for: "); - for (i = 0; i < sizeof(*bp); i++) - printk("%02x ", (unsigned int)((unsigned char *)bp)[i]); - printk(" "); -#endif - /* 2. the TCP header, excluding options, and assuming a * checksum of zero/ */ @@ -1066,10 +1056,7 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, th->check = 0; sg_set_buf(&sg[block++], th, sizeof(struct tcphdr)); nbytes += sizeof(struct tcphdr); -#ifdef CONFIG_TCP_MD5SIG_DEBUG - for (i = 0; i < sizeof(struct tcphdr); i++) - printk(" %02x", (unsigned int)((unsigned char *)th)[i]); -#endif + /* 3. the TCP segment data (if any) */ data_len = tcplen - (th->doff << 2); if (data_len > 0) { @@ -1084,12 +1071,6 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, sg_set_buf(&sg[block++], key->key, key->keylen); nbytes += key->keylen; -#ifdef CONFIG_TCP_MD5SIG_DEBUG - printk(" and password: "); - for (i = 0; i < key->keylen; i++) - printk("%02x ", (unsigned int)key->key[i]); -#endif - /* Now store the Hash into the packet */ err = crypto_hash_init(desc); if (err) @@ -1106,12 +1087,6 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, th->check = old_checksum; out: -#ifdef CONFIG_TCP_MD5SIG_DEBUG - printk(" result:"); - for (i = 0; i < 16; i++) - printk(" %02x", (unsigned int)(((u8*)md5_hash)[i])); - printk("\n"); -#endif return 0; clear_hash: tcp_put_md5sig_pool(); @@ -1241,20 +1216,6 @@ done_opts: NIPQUAD(iph->saddr), ntohs(th->source), NIPQUAD(iph->daddr), ntohs(th->dest), genhash ? " tcp_v4_calc_md5_hash failed" : ""); -#ifdef CONFIG_TCP_MD5SIG_DEBUG - do { - int i; - printk("Received: "); - for (i = 0; i < 16; i++) - printk("%02x ", - 0xff & (int)hash_location[i]); - printk("\n"); - printk("Calculated: "); - for (i = 0; i < 16; i++) - printk("%02x ", 0xff & (int)newhash[i]); - printk("\n"); - } while(0); -#endif } return 1; } -- cgit v1.2.3 From f5b99bcdddfb2338227faad3489c24907f37ee8e Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Thu, 30 Nov 2006 17:22:29 -0800 Subject: [NET]: Possible cleanups. This patch contains the following possible cleanups: - make the following needlessly global functions statis: - ipv4/tcp.c: __tcp_alloc_md5sig_pool() - ipv4/tcp_ipv4.c: tcp_v4_reqsk_md5_lookup() - ipv4/udplite.c: udplite_rcv() - ipv4/udplite.c: udplite_err() - make the following needlessly global structs static: - ipv4/tcp_ipv4.c: tcp_request_sock_ipv4_ops - ipv4/tcp_ipv4.c: tcp_sock_ipv4_specific - ipv6/tcp_ipv6.c: tcp_request_sock_ipv6_ops - net/ipv{4,6}/udplite.c: remove inline's from static functions (gcc should know best when to inline them) Signed-off-by: Adrian Bunk Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 2 +- net/ipv4/tcp_ipv4.c | 8 ++++---- net/ipv4/udplite.c | 10 +++++----- net/ipv6/tcp_ipv6.c | 2 +- net/ipv6/udplite.c | 10 +++++----- 5 files changed, 16 insertions(+), 16 deletions(-) (limited to 'net/ipv4/tcp_ipv4.c') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index a6b228914b8e..090c690627e5 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2278,7 +2278,7 @@ void tcp_free_md5sig_pool(void) EXPORT_SYMBOL(tcp_free_md5sig_pool); -struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void) +static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void) { int cpu; struct tcp_md5sig_pool **pool; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index dd3509a59fe0..7684297d80aa 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -840,8 +840,8 @@ struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, EXPORT_SYMBOL(tcp_v4_md5_lookup); -struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk, - struct request_sock *req) +static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk, + struct request_sock *req) { return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr); } @@ -1233,7 +1233,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = { .send_reset = tcp_v4_send_reset, }; -struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { +static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { #ifdef CONFIG_TCP_MD5SIG .md5_lookup = tcp_v4_reqsk_md5_lookup, #endif @@ -1820,7 +1820,7 @@ struct inet_connection_sock_af_ops ipv4_specific = { #endif }; -struct tcp_sock_af_ops tcp_sock_ipv4_specific = { +static struct tcp_sock_af_ops tcp_sock_ipv4_specific = { #ifdef CONFIG_TCP_MD5SIG .md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_calc_md5_hash, diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 561de6d8c734..b28fe1edf98b 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -18,23 +18,23 @@ DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics) __read_mostly; struct hlist_head udplite_hash[UDP_HTABLE_SIZE]; static int udplite_port_rover; -__inline__ int udplite_get_port(struct sock *sk, unsigned short p, - int (*c)(const struct sock *, const struct sock *)) +int udplite_get_port(struct sock *sk, unsigned short p, + int (*c)(const struct sock *, const struct sock *)) { return __udp_lib_get_port(sk, p, udplite_hash, &udplite_port_rover, c); } -static __inline__ int udplite_v4_get_port(struct sock *sk, unsigned short snum) +static int udplite_v4_get_port(struct sock *sk, unsigned short snum) { return udplite_get_port(sk, snum, ipv4_rcv_saddr_equal); } -__inline__ int udplite_rcv(struct sk_buff *skb) +static int udplite_rcv(struct sk_buff *skb) { return __udp4_lib_rcv(skb, udplite_hash, 1); } -__inline__ void udplite_err(struct sk_buff *skb, u32 info) +static void udplite_err(struct sk_buff *skb, u32 info) { return __udp4_lib_err(skb, info, udplite_hash); } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 517c50024bfc..dd1a23b5a0f5 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -929,7 +929,7 @@ static struct request_sock_ops tcp6_request_sock_ops __read_mostly = { .send_reset = tcp_v6_send_reset }; -struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { +static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { #ifdef CONFIG_TCP_MD5SIG .md5_lookup = tcp_v6_reqsk_md5_lookup, #endif diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index d4cafacc235b..629f97162fbc 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c @@ -17,14 +17,14 @@ DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6) __read_mostly; -static __inline__ int udplitev6_rcv(struct sk_buff **pskb) +static int udplitev6_rcv(struct sk_buff **pskb) { return __udp6_lib_rcv(pskb, udplite_hash, 1); } -static __inline__ void udplitev6_err(struct sk_buff *skb, - struct inet6_skb_parm *opt, - int type, int code, int offset, __be32 info) +static void udplitev6_err(struct sk_buff *skb, + struct inet6_skb_parm *opt, + int type, int code, int offset, __be32 info) { return __udp6_lib_err(skb, opt, type, code, offset, info, udplite_hash); } @@ -35,7 +35,7 @@ static struct inet6_protocol udplitev6_protocol = { .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; -static __inline__ int udplite_v6_get_port(struct sock *sk, unsigned short snum) +static int udplite_v6_get_port(struct sock *sk, unsigned short snum) { return udplite_get_port(sk, snum, ipv6_rcv_saddr_equal); } -- cgit v1.2.3 From b6332e6cf9c9198c0f3b0fe37c2c57514dafe1b8 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 30 Nov 2006 19:16:28 -0800 Subject: [TCP]: Fix warnings with TCP_MD5SIG disabled. Signed-off-by: Andrew Morton Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 8 ++++---- net/ipv6/tcp_ipv6.c | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'net/ipv4/tcp_ipv4.c') diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 7684297d80aa..a1222d6968c4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1233,11 +1233,11 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = { .send_reset = tcp_v4_send_reset, }; -static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { #ifdef CONFIG_TCP_MD5SIG +static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { .md5_lookup = tcp_v4_reqsk_md5_lookup, -#endif }; +#endif static struct timewait_sock_ops tcp_timewait_sock_ops = { .twsk_obj_size = sizeof(struct tcp_timewait_sock), @@ -1820,14 +1820,14 @@ struct inet_connection_sock_af_ops ipv4_specific = { #endif }; -static struct tcp_sock_af_ops tcp_sock_ipv4_specific = { #ifdef CONFIG_TCP_MD5SIG +static struct tcp_sock_af_ops tcp_sock_ipv4_specific = { .md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_calc_md5_hash, .md5_add = tcp_v4_md5_add_func, .md5_parse = tcp_v4_parse_md5_keys, -#endif }; +#endif /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index dd1a23b5a0f5..c25e930c2c69 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -929,11 +929,11 @@ static struct request_sock_ops tcp6_request_sock_ops __read_mostly = { .send_reset = tcp_v6_send_reset }; -static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { #ifdef CONFIG_TCP_MD5SIG +static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { .md5_lookup = tcp_v6_reqsk_md5_lookup, -#endif }; +#endif static struct timewait_sock_ops tcp6_timewait_sock_ops = { .twsk_obj_size = sizeof(struct tcp6_timewait_sock), -- cgit v1.2.3