summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/neighbour.c36
-rw-r--r--net/core/netpoll.c18
-rw-r--r--net/core/pktgen.c10
-rw-r--r--net/core/rtnetlink.c27
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sock.c6
7 files changed, 76 insertions, 31 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 908f07c3bd7d..460e7f99ce3e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2900,7 +2900,7 @@ int __dev_addr_add(struct dev_addr_list **list, int *count,
}
}
- da = kmalloc(sizeof(*da), GFP_ATOMIC);
+ da = kzalloc(sizeof(*da), GFP_ATOMIC);
if (da == NULL)
return -ENOMEM;
memcpy(da->da_addr, addr, alen);
@@ -3329,7 +3329,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
return -EOPNOTSUPP;
case SIOCADDMULTI:
- if (!dev->set_multicast_list ||
+ if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
return -EINVAL;
if (!netif_device_present(dev))
@@ -3338,7 +3338,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
dev->addr_len, 1);
case SIOCDELMULTI:
- if (!dev->set_multicast_list ||
+ if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
return -EINVAL;
if (!netif_device_present(dev))
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index a16cf1ec5e5e..19b8e003f150 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -358,11 +358,12 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
{
struct neighbour *n;
int key_len = tbl->key_len;
- u32 hash_val = tbl->hash(pkey, dev);
+ u32 hash_val;
NEIGH_CACHE_STAT_INC(tbl, lookups);
read_lock_bh(&tbl->lock);
+ hash_val = tbl->hash(pkey, dev);
for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
neigh_hold(n);
@@ -379,11 +380,12 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
{
struct neighbour *n;
int key_len = tbl->key_len;
- u32 hash_val = tbl->hash(pkey, NULL);
+ u32 hash_val;
NEIGH_CACHE_STAT_INC(tbl, lookups);
read_lock_bh(&tbl->lock);
+ hash_val = tbl->hash(pkey, NULL);
for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
if (!memcmp(n->primary_key, pkey, key_len) &&
(net == n->dev->nd_net)) {
@@ -464,6 +466,28 @@ out_neigh_release:
goto out;
}
+struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
+ struct net *net, const void *pkey, struct net_device *dev)
+{
+ struct pneigh_entry *n;
+ int key_len = tbl->key_len;
+ u32 hash_val = *(u32 *)(pkey + key_len - 4);
+
+ hash_val ^= (hash_val >> 16);
+ hash_val ^= hash_val >> 8;
+ hash_val ^= hash_val >> 4;
+ hash_val &= PNEIGH_HASHMASK;
+
+ for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
+ if (!memcmp(n->key, pkey, key_len) &&
+ (n->net == net) &&
+ (n->dev == dev || !n->dev))
+ break;
+ }
+
+ return n;
+}
+
struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
struct net *net, const void *pkey,
struct net_device *dev, int creat)
@@ -507,6 +531,7 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
if (tbl->pconstructor && tbl->pconstructor(n)) {
if (dev)
dev_put(dev);
+ release_net(net);
kfree(n);
n = NULL;
goto out;
@@ -836,7 +861,7 @@ static void neigh_timer_handler(unsigned long arg)
struct sk_buff *skb = skb_peek(&neigh->arp_queue);
/* keep skb alive even if arp_queue overflows */
if (skb)
- skb_get(skb);
+ skb = skb_copy(skb, GFP_ATOMIC);
write_unlock(&neigh->lock);
neigh->ops->solicit(neigh, skb);
atomic_inc(&neigh->probes);
@@ -1386,10 +1411,10 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
panic("cannot create neighbour cache statistics");
#ifdef CONFIG_PROC_FS
- tbl->pde = create_proc_entry(tbl->id, 0, init_net.proc_net_stat);
+ tbl->pde = proc_create(tbl->id, 0, init_net.proc_net_stat,
+ &neigh_stat_seq_fops);
if (!tbl->pde)
panic("cannot create neighbour proc dir entry");
- tbl->pde->proc_fops = &neigh_stat_seq_fops;
tbl->pde->data = tbl;
#endif
@@ -2800,6 +2825,7 @@ EXPORT_SYMBOL(neigh_table_init_no_netlink);
EXPORT_SYMBOL(neigh_update);
EXPORT_SYMBOL(pneigh_enqueue);
EXPORT_SYMBOL(pneigh_lookup);
+EXPORT_SYMBOL_GPL(__pneigh_lookup);
#ifdef CONFIG_ARPD
EXPORT_SYMBOL(neigh_app_ns);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 6faa128a4c8e..c635de52526c 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -39,6 +39,8 @@ static struct sk_buff_head skb_pool;
static atomic_t trapped;
#define USEC_PER_POLL 50
+#define NETPOLL_RX_ENABLED 1
+#define NETPOLL_RX_DROP 2
#define MAX_SKB_SIZE \
(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
@@ -126,11 +128,13 @@ static int poll_one_napi(struct netpoll_info *npinfo,
if (!test_bit(NAPI_STATE_SCHED, &napi->state))
return budget;
+ npinfo->rx_flags |= NETPOLL_RX_DROP;
atomic_inc(&trapped);
work = napi->poll(napi, budget);
atomic_dec(&trapped);
+ npinfo->rx_flags &= ~NETPOLL_RX_DROP;
return budget - work;
}
@@ -211,10 +215,12 @@ static void zap_completion_queue(void)
while (clist != NULL) {
struct sk_buff *skb = clist;
clist = clist->next;
- if (skb->destructor)
+ if (skb->destructor) {
+ atomic_inc(&skb->users);
dev_kfree_skb_any(skb); /* put this one back */
- else
+ } else {
__kfree_skb(skb);
+ }
}
}
@@ -472,7 +478,7 @@ int __netpoll_rx(struct sk_buff *skb)
if (skb->dev->type != ARPHRD_ETHER)
goto out;
- /* if receive ARP during middle of NAPI poll, then queue */
+ /* check if netpoll clients need ARP */
if (skb->protocol == htons(ETH_P_ARP) &&
atomic_read(&trapped)) {
skb_queue_tail(&npi->arp_tx, skb);
@@ -534,9 +540,6 @@ int __netpoll_rx(struct sk_buff *skb)
return 1;
out:
- /* If packet received while already in poll then just
- * silently drop.
- */
if (atomic_read(&trapped)) {
kfree_skb(skb);
return 1;
@@ -675,6 +678,7 @@ int netpoll_setup(struct netpoll *np)
goto release;
}
+ npinfo->rx_flags = 0;
npinfo->rx_np = NULL;
spin_lock_init(&npinfo->rx_lock);
@@ -756,6 +760,7 @@ int netpoll_setup(struct netpoll *np)
if (np->rx_hook) {
spin_lock_irqsave(&npinfo->rx_lock, flags);
+ npinfo->rx_flags |= NETPOLL_RX_ENABLED;
npinfo->rx_np = np;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
@@ -797,6 +802,7 @@ void netpoll_cleanup(struct netpoll *np)
if (npinfo->rx_np == np) {
spin_lock_irqsave(&npinfo->rx_lock, flags);
npinfo->rx_np = NULL;
+ npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index bfcdfaebca5c..20e63b302ba6 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3570,14 +3570,14 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
if (err)
goto out1;
- pkt_dev->entry = create_proc_entry(ifname, 0600, pg_proc_dir);
+ pkt_dev->entry = proc_create(ifname, 0600,
+ pg_proc_dir, &pktgen_if_fops);
if (!pkt_dev->entry) {
printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n",
PG_PROC_DIR, ifname);
err = -EINVAL;
goto out2;
}
- pkt_dev->entry->proc_fops = &pktgen_if_fops;
pkt_dev->entry->data = pkt_dev;
#ifdef CONFIG_XFRM
pkt_dev->ipsmode = XFRM_MODE_TRANSPORT;
@@ -3628,7 +3628,7 @@ static int __init pktgen_create_thread(int cpu)
kthread_bind(p, cpu);
t->tsk = p;
- pe = create_proc_entry(t->tsk->comm, 0600, pg_proc_dir);
+ pe = proc_create(t->tsk->comm, 0600, pg_proc_dir, &pktgen_thread_fops);
if (!pe) {
printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n",
PG_PROC_DIR, t->tsk->comm);
@@ -3638,7 +3638,6 @@ static int __init pktgen_create_thread(int cpu)
return -EINVAL;
}
- pe->proc_fops = &pktgen_thread_fops;
pe->data = t;
wake_up_process(p);
@@ -3709,7 +3708,7 @@ static int __init pg_init(void)
return -ENODEV;
pg_proc_dir->owner = THIS_MODULE;
- pe = create_proc_entry(PGCTRL, 0600, pg_proc_dir);
+ pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops);
if (pe == NULL) {
printk(KERN_ERR "pktgen: ERROR: cannot create %s "
"procfs entry.\n", PGCTRL);
@@ -3717,7 +3716,6 @@ static int __init pg_init(void)
return -EINVAL;
}
- pe->proc_fops = &pktgen_fops;
pe->data = NULL;
/* Register us to receive netdevice events */
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 61ac8d06292c..2bd9c5f7627d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -689,10 +689,12 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
[IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
[IFLA_MTU] = { .type = NLA_U32 },
+ [IFLA_LINK] = { .type = NLA_U32 },
[IFLA_TXQLEN] = { .type = NLA_U32 },
[IFLA_WEIGHT] = { .type = NLA_U32 },
[IFLA_OPERSTATE] = { .type = NLA_U8 },
[IFLA_LINKMODE] = { .type = NLA_U8 },
+ [IFLA_LINKINFO] = { .type = NLA_NESTED },
[IFLA_NET_NS_PID] = { .type = NLA_U32 },
};
@@ -720,6 +722,21 @@ static struct net *get_net_ns_by_pid(pid_t pid)
return net;
}
+static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
+{
+ if (dev) {
+ if (tb[IFLA_ADDRESS] &&
+ nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
+ return -EINVAL;
+
+ if (tb[IFLA_BROADCAST] &&
+ nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
struct nlattr **tb, char *ifname, int modified)
{
@@ -892,12 +909,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
goto errout;
}
- if (tb[IFLA_ADDRESS] &&
- nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
- goto errout_dev;
-
- if (tb[IFLA_BROADCAST] &&
- nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
+ if ((err = validate_linkmsg(dev, tb)) < 0)
goto errout_dev;
err = do_setlink(dev, ifm, tb, ifname, 0);
@@ -1018,6 +1030,9 @@ replay:
else
dev = NULL;
+ if ((err = validate_linkmsg(dev, tb)) < 0)
+ return err;
+
if (tb[IFLA_LINKINFO]) {
err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
tb[IFLA_LINKINFO], ifla_info_policy);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 0d0fd28a9041..608701339620 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2131,8 +2131,8 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
* @features: features for the output path (see dev->features)
*
* This function performs segmentation on the given skb. It returns
- * the segment at the given position. It returns NULL if there are
- * no more segments to generate, or when an error is encountered.
+ * a pointer to the first in a list of new skbs for the segments.
+ * In case of error it returns ERR_PTR(err).
*/
struct sk_buff *skb_segment(struct sk_buff *skb, int features)
{
diff --git a/net/core/sock.c b/net/core/sock.c
index 09cb3a74de7f..7a0567b4b2c9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1621,7 +1621,7 @@ static void sock_def_readable(struct sock *sk, int len)
{
read_lock(&sk->sk_callback_lock);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible_sync(sk->sk_sleep);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
read_unlock(&sk->sk_callback_lock);
}
@@ -1635,7 +1635,7 @@ static void sock_def_write_space(struct sock *sk)
*/
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible_sync(sk->sk_sleep);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))
@@ -1725,7 +1725,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
- sk->sk_stamp = ktime_set(-1L, -1L);
+ sk->sk_stamp = ktime_set(-1L, 0);
atomic_set(&sk->sk_refcnt, 1);
atomic_set(&sk->sk_drops, 0);