summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/filter.c41
-rw-r--r--net/core/net_namespace.c1
-rw-r--r--net/core/skbuff.c10
-rw-r--r--net/dsa/dsa.c6
-rw-r--r--net/ipv4/fou.c3
-rw-r--r--net/ipv4/inet_diag.c28
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_dctcp.c5
-rw-r--r--net/ipv4/tcp_illinois.c6
-rw-r--r--net/ipv4/tcp_vegas.c5
-rw-r--r--net/ipv4/tcp_vegas.h2
-rw-r--r--net/ipv4/tcp_westwood.c6
-rw-r--r--net/sched/act_bpf.c3
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/cls_bpf.c3
16 files changed, 91 insertions, 42 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index af4a1b0adc10..1796cef55ab5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2713,7 +2713,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
if (unlikely(!skb))
goto out_null;
- if (netif_needs_gso(dev, skb, features)) {
+ if (netif_needs_gso(skb, features)) {
struct sk_buff *segs;
segs = skb_gso_segment(skb, features);
diff --git a/net/core/filter.c b/net/core/filter.c
index b669e75d2b36..bf831a85c315 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1175,12 +1175,27 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
return 0;
}
+/**
+ * bpf_skb_clone_not_writable - is the header of a clone not writable
+ * @skb: buffer to check
+ * @len: length up to which to write, can be negative
+ *
+ * Returns true if modifying the header part of the cloned buffer
+ * does require the data to be copied. I.e. this version works with
+ * negative lengths needed for eBPF case!
+ */
+static bool bpf_skb_clone_unwritable(const struct sk_buff *skb, int len)
+{
+ return skb_header_cloned(skb) ||
+ (int) skb_headroom(skb) + len > skb->hdr_len;
+}
+
#define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1)
static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
{
struct sk_buff *skb = (struct sk_buff *) (long) r1;
- unsigned int offset = (unsigned int) r2;
+ int offset = (int) r2;
void *from = (void *) (long) r3;
unsigned int len = (unsigned int) r4;
char buf[16];
@@ -1194,10 +1209,12 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
*
* so check for invalid 'offset' and too large 'len'
*/
- if (unlikely(offset > 0xffff || len > sizeof(buf)))
+ if (unlikely((u32) offset > 0xffff || len > sizeof(buf)))
return -EFAULT;
- if (skb_cloned(skb) && !skb_clone_writable(skb, offset + len))
+ offset -= skb->data - skb_mac_header(skb);
+ if (unlikely(skb_cloned(skb) &&
+ bpf_skb_clone_unwritable(skb, offset + len)))
return -EFAULT;
ptr = skb_header_pointer(skb, offset, len, buf);
@@ -1232,15 +1249,18 @@ const struct bpf_func_proto bpf_skb_store_bytes_proto = {
#define BPF_HEADER_FIELD_SIZE(flags) ((flags) & 0x0f)
#define BPF_IS_PSEUDO_HEADER(flags) ((flags) & 0x10)
-static u64 bpf_l3_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags)
+static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
{
struct sk_buff *skb = (struct sk_buff *) (long) r1;
+ int offset = (int) r2;
__sum16 sum, *ptr;
- if (unlikely(offset > 0xffff))
+ if (unlikely((u32) offset > 0xffff))
return -EFAULT;
- if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum)))
+ offset -= skb->data - skb_mac_header(skb);
+ if (unlikely(skb_cloned(skb) &&
+ bpf_skb_clone_unwritable(skb, offset + sizeof(sum))))
return -EFAULT;
ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1276,16 +1296,19 @@ const struct bpf_func_proto bpf_l3_csum_replace_proto = {
.arg5_type = ARG_ANYTHING,
};
-static u64 bpf_l4_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags)
+static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
{
struct sk_buff *skb = (struct sk_buff *) (long) r1;
u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags);
+ int offset = (int) r2;
__sum16 sum, *ptr;
- if (unlikely(offset > 0xffff))
+ if (unlikely((u32) offset > 0xffff))
return -EFAULT;
- if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum)))
+ offset -= skb->data - skb_mac_header(skb);
+ if (unlikely(skb_cloned(skb) &&
+ bpf_skb_clone_unwritable(skb, offset + sizeof(sum))))
return -EFAULT;
ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index a3abb719221f..78fc04ad36fc 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -16,7 +16,6 @@
#include <linux/export.h>
#include <linux/user_namespace.h>
#include <linux/net_namespace.h>
-#include <linux/rtnetlink.h>
#include <net/sock.h>
#include <net/netlink.h>
#include <net/net_namespace.h>
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3b6e5830256e..d1967dab9cc6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4124,19 +4124,21 @@ EXPORT_SYMBOL(skb_try_coalesce);
*/
void skb_scrub_packet(struct sk_buff *skb, bool xnet)
{
- if (xnet)
- skb_orphan(skb);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->skb_iif = 0;
skb->ignore_df = 0;
skb_dst_drop(skb);
- skb->mark = 0;
skb_sender_cpu_clear(skb);
- skb_init_secmark(skb);
secpath_reset(skb);
nf_reset(skb);
nf_reset_trace(skb);
+
+ if (!xnet)
+ return;
+
+ skb_orphan(skb);
+ skb->mark = 0;
}
EXPORT_SYMBOL_GPL(skb_scrub_packet);
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 5eaadabe23a1..079a224471e7 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -124,7 +124,7 @@ static ssize_t temp1_max_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(temp1_max, S_IRUGO, temp1_max_show, temp1_max_store);
+static DEVICE_ATTR_RW(temp1_max);
static ssize_t temp1_max_alarm_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -159,8 +159,8 @@ static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj,
if (index == 1) {
if (!drv->get_temp_limit)
mode = 0;
- else if (drv->set_temp_limit)
- mode |= S_IWUSR;
+ else if (!drv->set_temp_limit)
+ mode &= ~S_IWUSR;
} else if (index == 2 && !drv->get_temp_alarm) {
mode = 0;
}
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index af150b43b214..34968cd5c146 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -711,11 +711,10 @@ static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->nlh->nlmsg_seq, NLM_F_MULTI,
skb, FOU_CMD_GET);
if (ret)
- goto done;
+ break;
}
mutex_unlock(&fn->fou_lock);
-done:
cb->args[0] = idx;
return skb->len;
}
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 70e8b3c308ec..bb77ebdae3b3 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -111,6 +111,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
const struct nlmsghdr *unlh)
{
const struct inet_sock *inet = inet_sk(sk);
+ const struct tcp_congestion_ops *ca_ops;
const struct inet_diag_handler *handler;
int ext = req->idiag_ext;
struct inet_diag_msg *r;
@@ -208,16 +209,31 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
info = nla_data(attr);
}
- if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops)
- if (nla_put_string(skb, INET_DIAG_CONG,
- icsk->icsk_ca_ops->name) < 0)
+ if (ext & (1 << (INET_DIAG_CONG - 1))) {
+ int err = 0;
+
+ rcu_read_lock();
+ ca_ops = READ_ONCE(icsk->icsk_ca_ops);
+ if (ca_ops)
+ err = nla_put_string(skb, INET_DIAG_CONG, ca_ops->name);
+ rcu_read_unlock();
+ if (err < 0)
goto errout;
+ }
handler->idiag_get_info(sk, r, info);
- if (sk->sk_state < TCP_TIME_WAIT &&
- icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
- icsk->icsk_ca_ops->get_info(sk, ext, skb);
+ if (sk->sk_state < TCP_TIME_WAIT) {
+ int err = 0;
+
+ rcu_read_lock();
+ ca_ops = READ_ONCE(icsk->icsk_ca_ops);
+ if (ca_ops && ca_ops->get_info)
+ err = ca_ops->get_info(sk, ext, skb);
+ rcu_read_unlock();
+ if (err < 0)
+ goto errout;
+ }
out:
nlmsg_end(skb, nlh);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 18e3a12eb1b2..59c8a027721b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2595,6 +2595,7 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info)
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 now = tcp_time_stamp;
+ u32 rate;
memset(info, 0, sizeof(*info));
@@ -2655,10 +2656,11 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info)
info->tcpi_total_retrans = tp->total_retrans;
- info->tcpi_pacing_rate = sk->sk_pacing_rate != ~0U ?
- sk->sk_pacing_rate : ~0ULL;
- info->tcpi_max_pacing_rate = sk->sk_max_pacing_rate != ~0U ?
- sk->sk_max_pacing_rate : ~0ULL;
+ rate = READ_ONCE(sk->sk_pacing_rate);
+ info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL;
+
+ rate = READ_ONCE(sk->sk_max_pacing_rate);
+ info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
}
EXPORT_SYMBOL_GPL(tcp_get_info);
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index b504371af742..4376016f7fa5 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -277,7 +277,7 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
}
}
-static void dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
+static int dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
{
const struct dctcp *ca = inet_csk_ca(sk);
@@ -297,8 +297,9 @@ static void dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
info.dctcp_ab_tot = ca->acked_bytes_total;
}
- nla_put(skb, INET_DIAG_DCTCPINFO, sizeof(info), &info);
+ return nla_put(skb, INET_DIAG_DCTCPINFO, sizeof(info), &info);
}
+ return 0;
}
static struct tcp_congestion_ops dctcp __read_mostly = {
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 1d5a30a90adf..67476f085e48 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -300,8 +300,7 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
}
/* Extract info for Tcp socket info provided via netlink. */
-static void tcp_illinois_info(struct sock *sk, u32 ext,
- struct sk_buff *skb)
+static int tcp_illinois_info(struct sock *sk, u32 ext, struct sk_buff *skb)
{
const struct illinois *ca = inet_csk_ca(sk);
@@ -318,8 +317,9 @@ static void tcp_illinois_info(struct sock *sk, u32 ext,
do_div(t, info.tcpv_rttcnt);
info.tcpv_rtt = t;
}
- nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
+ return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
}
+ return 0;
}
static struct tcp_congestion_ops tcp_illinois __read_mostly = {
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index a6afde666ab1..c71a1b8f7bde 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -286,7 +286,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
}
/* Extract info for Tcp socket info provided via netlink. */
-void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
+int tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
{
const struct vegas *ca = inet_csk_ca(sk);
if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
@@ -297,8 +297,9 @@ void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
.tcpv_minrtt = ca->minRTT,
};
- nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
+ return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
}
+ return 0;
}
EXPORT_SYMBOL_GPL(tcp_vegas_get_info);
diff --git a/net/ipv4/tcp_vegas.h b/net/ipv4/tcp_vegas.h
index 0531b99d8637..e8a6b33cc61d 100644
--- a/net/ipv4/tcp_vegas.h
+++ b/net/ipv4/tcp_vegas.h
@@ -19,6 +19,6 @@ void tcp_vegas_init(struct sock *sk);
void tcp_vegas_state(struct sock *sk, u8 ca_state);
void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
-void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
+int tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
#endif /* __TCP_VEGAS_H */
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index bb63fba47d47..b3c57cceb990 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -256,8 +256,7 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
}
/* Extract info for Tcp socket info provided via netlink. */
-static void tcp_westwood_info(struct sock *sk, u32 ext,
- struct sk_buff *skb)
+static int tcp_westwood_info(struct sock *sk, u32 ext, struct sk_buff *skb)
{
const struct westwood *ca = inet_csk_ca(sk);
@@ -268,8 +267,9 @@ static void tcp_westwood_info(struct sock *sk, u32 ext,
.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min),
};
- nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
+ return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
}
+ return 0;
}
static struct tcp_congestion_ops tcp_westwood __read_mostly = {
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 4d2cede17468..dc6a2d324bd8 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -38,6 +38,9 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
struct tcf_bpf *prog = act->priv;
int action, filter_res;
+ if (unlikely(!skb_mac_header_was_set(skb)))
+ return TC_ACT_UNSPEC;
+
spin_lock(&prog->tcf_lock);
prog->tcf_tm.lastuse = jiffies;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 5953517ec059..3f63ceac8e01 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -157,7 +157,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
if (!(at & AT_EGRESS)) {
if (m->tcfm_ok_push)
- skb_push(skb2, skb2->dev->hard_header_len);
+ skb_push(skb2, skb->mac_len);
}
/* mirror is always swallowed */
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 5c4171c5d2bd..91bd9c19471d 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -66,6 +66,9 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct cls_bpf_prog *prog;
int ret = -1;
+ if (unlikely(!skb_mac_header_was_set(skb)))
+ return -1;
+
/* Needed here for accessing maps. */
rcu_read_lock();
list_for_each_entry_rcu(prog, &head->plist, link) {