diff options
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r-- | drivers/net/xen-netfront.c | 129 |
1 files changed, 51 insertions, 78 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index bf2744e1e3db..6ffc482550c1 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -57,6 +57,7 @@ #include <xen/interface/grant_table.h> /* Module parameters */ +#define MAX_QUEUES_DEFAULT 8 static unsigned int xennet_max_queues; module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, @@ -281,6 +282,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) { RING_IDX req_prod = queue->rx.req_prod_pvt; int notify; + int err = 0; if (unlikely(!netif_carrier_ok(queue->info->netdev))) return; @@ -295,8 +297,10 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) struct xen_netif_rx_request *req; skb = xennet_alloc_one_rx_buffer(queue); - if (!skb) + if (!skb) { + err = -ENOMEM; break; + } id = xennet_rxidx(req_prod); @@ -320,8 +324,13 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) queue->rx.req_prod_pvt = req_prod; - /* Not enough requests? Try again later. */ - if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { + /* Try again later if there are not enough requests or skb allocation + * failed. + * Enough requests is quantified as the sum of newly created slots and + * the unconsumed slots at the backend. + */ + if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN || + unlikely(err)) { mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); return; } @@ -1051,7 +1060,7 @@ err: if (work_done < budget) { int more_to_do = 0; - napi_complete(napi); + napi_complete_done(napi, work_done); RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); if (more_to_do) @@ -1073,8 +1082,8 @@ static int xennet_change_mtu(struct net_device *dev, int mtu) return 0; } -static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *tot) +static void xennet_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot) { struct netfront_info *np = netdev_priv(dev); int cpu; @@ -1105,8 +1114,6 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, tot->rx_errors = dev->stats.rx_errors; tot->tx_dropped = dev->stats.tx_dropped; - - return tot; } static void xennet_release_tx_bufs(struct netfront_queue *queue) @@ -1169,43 +1176,23 @@ static netdev_features_t xennet_fix_features(struct net_device *dev, netdev_features_t features) { struct netfront_info *np = netdev_priv(dev); - int val; - - if (features & NETIF_F_SG) { - if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", - "%d", &val) < 0) - val = 0; - - if (!val) - features &= ~NETIF_F_SG; - } - - if (features & NETIF_F_IPV6_CSUM) { - if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, - "feature-ipv6-csum-offload", "%d", &val) < 0) - val = 0; - - if (!val) - features &= ~NETIF_F_IPV6_CSUM; - } - if (features & NETIF_F_TSO) { - if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, - "feature-gso-tcpv4", "%d", &val) < 0) - val = 0; + if (features & NETIF_F_SG && + !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0)) + features &= ~NETIF_F_SG; - if (!val) - features &= ~NETIF_F_TSO; - } + if (features & NETIF_F_IPV6_CSUM && + !xenbus_read_unsigned(np->xbdev->otherend, + "feature-ipv6-csum-offload", 0)) + features &= ~NETIF_F_IPV6_CSUM; - if (features & NETIF_F_TSO6) { - if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, - "feature-gso-tcpv6", "%d", &val) < 0) - val = 0; + if (features & NETIF_F_TSO && + !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0)) + features &= ~NETIF_F_TSO; - if (!val) - features &= ~NETIF_F_TSO6; - } + if (features & NETIF_F_TSO6 && + !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0)) + features &= ~NETIF_F_TSO6; return features; } @@ -1329,6 +1316,8 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netdev->features |= netdev->hw_features; netdev->ethtool_ops = &xennet_ethtool_ops; + netdev->min_mtu = 0; + netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; @@ -1397,6 +1386,8 @@ static void xennet_disconnect_backend(struct netfront_info *info) for (i = 0; i < num_queues && info->queues; ++i) { struct netfront_queue *queue = &info->queues[i]; + del_timer_sync(&queue->rx_refill_timer); + if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) unbind_from_irqhandler(queue->tx_irq, queue); if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { @@ -1751,7 +1742,6 @@ static void xennet_destroy_queues(struct netfront_info *info) if (netif_running(info->netdev)) napi_disable(&queue->napi); - del_timer_sync(&queue->rx_refill_timer); netif_napi_del(&queue->napi); } @@ -1821,18 +1811,13 @@ static int talk_to_netback(struct xenbus_device *dev, info->netdev->irq = 0; /* Check if backend supports multiple queues */ - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "multi-queue-max-queues", "%u", &max_queues); - if (err < 0) - max_queues = 1; + max_queues = xenbus_read_unsigned(info->xbdev->otherend, + "multi-queue-max-queues", 1); num_queues = min(max_queues, xennet_max_queues); /* Check feature-split-event-channels */ - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "feature-split-event-channels", "%u", - &feature_split_evtchn); - if (err < 0) - feature_split_evtchn = 0; + feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend, + "feature-split-event-channels", 0); /* Read mac addr. */ err = xen_net_read_mac(dev, info->netdev->dev_addr); @@ -1845,27 +1830,19 @@ static int talk_to_netback(struct xenbus_device *dev, xennet_destroy_queues(info); err = xennet_create_queues(info, &num_queues); - if (err < 0) - goto destroy_ring; + if (err < 0) { + xenbus_dev_fatal(dev, err, "creating queues"); + kfree(info->queues); + info->queues = NULL; + goto out; + } /* Create shared ring, alloc event channel -- for each queue */ for (i = 0; i < num_queues; ++i) { queue = &info->queues[i]; err = setup_netfront(dev, queue, feature_split_evtchn); - if (err) { - /* setup_netfront() will tidy up the current - * queue on error, but we need to clean up - * those already allocated. - */ - if (i > 0) { - rtnl_lock(); - netif_set_real_num_tx_queues(info->netdev, i); - rtnl_unlock(); - goto destroy_ring; - } else { - goto out; - } - } + if (err) + goto destroy_ring; } again: @@ -1955,9 +1932,10 @@ abort_transaction_no_dev_fatal: xenbus_transaction_end(xbt, 1); destroy_ring: xennet_disconnect_backend(info); - kfree(info->queues); - info->queues = NULL; + xennet_destroy_queues(info); out: + unregister_netdev(info->netdev); + xennet_free_netdev(info->netdev); return err; } @@ -1966,16 +1944,10 @@ static int xennet_connect(struct net_device *dev) struct netfront_info *np = netdev_priv(dev); unsigned int num_queues = 0; int err; - unsigned int feature_rx_copy; unsigned int j = 0; struct netfront_queue *queue = NULL; - err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, - "feature-rx-copy", "%u", &feature_rx_copy); - if (err != 1) - feature_rx_copy = 0; - - if (!feature_rx_copy) { + if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) { dev_info(&dev->dev, "backend does not support copying receive path\n"); return -ENODEV; @@ -2193,11 +2165,12 @@ static int __init netif_init(void) pr_info("Initialising Xen virtual ethernet driver\n"); - /* Allow as many queues as there are CPUs if user has not + /* Allow as many queues as there are CPUs inut max. 8 if user has not * specified a value. */ if (xennet_max_queues == 0) - xennet_max_queues = num_online_cpus(); + xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT, + num_online_cpus()); return xenbus_register_frontend(&netfront_driver); } |