summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/gen_estimator.c13
-rw-r--r--net/core/netpoll.c8
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/skbuff.c2
4 files changed, 15 insertions, 10 deletions
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9cc9f95b109e..6d62d4618cfc 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -66,9 +66,9 @@
NOTES.
- * The stored value for avbps is scaled by 2^5, so that maximal
- rate is ~1Gbit, avpps is scaled by 2^10.
-
+ * avbps is scaled by 2^5, avpps is scaled by 2^10.
+ * both values are reported as 32 bit unsigned values. bps can
+ overflow for fast links : max speed being 34360Mbit/sec
* Minimal interval is HZ/4=250msec (it is the greatest common divisor
for HZ=100 and HZ=1024 8)), maximal interval
is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
@@ -86,9 +86,9 @@ struct gen_estimator
spinlock_t *stats_lock;
int ewma_log;
u64 last_bytes;
+ u64 avbps;
u32 last_packets;
u32 avpps;
- u32 avbps;
struct rcu_head e_rcu;
struct rb_node node;
};
@@ -115,6 +115,7 @@ static void est_timer(unsigned long arg)
rcu_read_lock();
list_for_each_entry_rcu(e, &elist[idx].list, list) {
u64 nbytes;
+ u64 brate;
u32 npackets;
u32 rate;
@@ -125,9 +126,9 @@ static void est_timer(unsigned long arg)
nbytes = e->bstats->bytes;
npackets = e->bstats->packets;
- rate = (nbytes - e->last_bytes)<<(7 - idx);
+ brate = (nbytes - e->last_bytes)<<(7 - idx);
e->last_bytes = nbytes;
- e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log;
+ e->avbps += ((s64)(brate - e->avbps)) >> e->ewma_log;
e->rate_est->bps = (e->avbps+0xF)>>5;
rate = (npackets - e->last_packets)<<(12 - idx);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index b5873bdff612..64f51eec6576 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -175,9 +175,13 @@ static void service_arp_queue(struct netpoll_info *npi)
void netpoll_poll(struct netpoll *np)
{
struct net_device *dev = np->dev;
- const struct net_device_ops *ops = dev->netdev_ops;
+ const struct net_device_ops *ops;
+
+ if (!dev || !netif_running(dev))
+ return;
- if (!dev || !netif_running(dev) || !ops->ndo_poll_controller)
+ ops = dev->netdev_ops;
+ if (!ops->ndo_poll_controller)
return;
/* Process pending work on NIC */
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 3779c1438c11..0666a827bc62 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2447,7 +2447,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev)
if (pkt_dev->cflows) {
/* let go of the SAs if we have them */
int i = 0;
- for (; i < pkt_dev->nflows; i++){
+ for (; i < pkt_dev->cflows; i++) {
struct xfrm_state *x = pkt_dev->flows[i].x;
if (x) {
xfrm_state_put(x);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d152394b2611..e505b5392e1e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2288,7 +2288,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
next_skb:
block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
- if (abs_offset < block_limit) {
+ if (abs_offset < block_limit && !st->frag_data) {
*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
return block_limit - abs_offset;
}