summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2016-12-09 15:54:23 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2016-12-09 15:54:23 +1100
commita748323bc1985e3752583e15688df22e5d14d26f (patch)
tree6deaf3e2055376b93651d5a1a4de6e2c8d12271b /drivers
parent85088573ef94f968d221843ba09cb41639634172 (diff)
parent51a0e856fadbf52e0fee1ed1d241f74c710f58c5 (diff)
Merge branch 'akpm-current/current'
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c77
-rw-r--r--drivers/of/fdt.c19
-rw-r--r--drivers/pcmcia/m32r_pcc.c41
-rw-r--r--drivers/sh/intc/virq.c2
5 files changed, 104 insertions, 42 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index d11093dce1b9..acbc3abe2ddd 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -210,7 +210,12 @@ struct igb_tx_buffer {
struct igb_rx_buffer {
dma_addr_t dma;
struct page *page;
- unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 page_offset;
+#else
+ __u16 page_offset;
+#endif
+ __u16 pagecnt_bias;
};
struct igb_tx_queue_stats {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index cae24a8ccf47..a761001308dc 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3947,11 +3947,23 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
if (!buffer_info->page)
continue;
- dma_unmap_page(rx_ring->dev,
- buffer_info->dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- __free_page(buffer_info->page);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ buffer_info->dma,
+ buffer_info->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev,
+ buffer_info->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ __page_frag_drain(buffer_info->page, 0,
+ buffer_info->pagecnt_bias);
buffer_info->page = NULL;
}
@@ -6812,12 +6824,6 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
/* transfer page from old buffer to new buffer */
*new_buff = *old_buff;
-
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
- old_buff->page_offset,
- IGB_RX_BUFSZ,
- DMA_FROM_DEVICE);
}
static inline bool igb_page_is_reserved(struct page *page)
@@ -6829,13 +6835,15 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
struct page *page,
unsigned int truesize)
{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+
/* avoid re-using remote pages */
if (unlikely(igb_page_is_reserved(page)))
return false;
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
+ if (unlikely(page_ref_count(page) != pagecnt_bias))
return false;
/* flip page offset to other buffer */
@@ -6848,10 +6856,14 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
return false;
#endif
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
*/
- page_ref_inc(page);
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
return true;
}
@@ -6903,7 +6915,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
return true;
/* this page cannot be reused so discard it */
- __free_page(page);
return false;
}
@@ -6938,6 +6949,13 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
page = rx_buffer->page;
prefetchw(page);
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+
if (likely(!skb)) {
void *page_addr = page_address(page) +
rx_buffer->page_offset;
@@ -6962,21 +6980,18 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
prefetchw(skb->data);
}
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
-
/* pull page into skb */
if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
/* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer);
} else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ /* We are not reusing the buffer so unmap it and free
+ * any references we are holding to it
+ */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ __page_frag_drain(page, 0, rx_buffer->pagecnt_bias);
}
/* clear contents of rx_buffer */
@@ -7234,7 +7249,8 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
@@ -7249,6 +7265,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
+ bi->pagecnt_bias = 1;
return true;
}
@@ -7275,6 +7292,12 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
if (!igb_alloc_mapped_page(rx_ring, bi))
break;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index c89d5d231a0e..c9b5cac03b36 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -1015,6 +1015,7 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
const __be32 *reg, *endp;
int l;
+ bool hotpluggable;
/* We are scanning "memory" nodes only */
if (type == NULL) {
@@ -1034,6 +1035,7 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
return 0;
endp = reg + (l / sizeof(__be32));
+ hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
pr_debug("memory scan node %s, reg size %d,\n", uname, l);
@@ -1049,6 +1051,13 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
(unsigned long long)size);
early_init_dt_add_memory_arch(base, size);
+
+ if (!hotpluggable)
+ continue;
+
+ if (early_init_dt_mark_hotplug_memory_arch(base, size))
+ pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
+ base, base + size);
}
return 0;
@@ -1146,6 +1155,11 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
memblock_add(base, size);
}
+int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size)
+{
+ return memblock_mark_hotplug(base, size);
+}
+
int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
phys_addr_t size, bool nomap)
{
@@ -1168,6 +1182,11 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
WARN_ON(1);
}
+int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size)
+{
+ return -ENOSYS;
+}
+
int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
phys_addr_t size, bool nomap)
{
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c
index eb126b98ed8a..e50bbf826188 100644
--- a/drivers/pcmcia/m32r_pcc.c
+++ b/drivers/pcmcia/m32r_pcc.c
@@ -296,10 +296,11 @@ static int __init is_alive(u_short sock)
return 0;
}
-static void add_pcc_socket(ulong base, int irq, ulong mapaddr,
- unsigned int ioaddr)
+static int add_pcc_socket(ulong base, int irq, ulong mapaddr,
+ unsigned int ioaddr)
{
pcc_socket_t *t = &socket[pcc_sockets];
+ int err;
/* add sockets */
t->ioaddr = ioaddr;
@@ -328,11 +329,16 @@ static void add_pcc_socket(ulong base, int irq, ulong mapaddr,
t->socket.irq_mask = 0;
t->socket.pci_irq = 2 + pcc_sockets; /* XXX */
- request_irq(irq, pcc_interrupt, 0, "m32r-pcc", pcc_interrupt);
+ err = request_irq(irq, pcc_interrupt, 0, "m32r-pcc", pcc_interrupt);
+ if (err) {
+ if (t->base > 0)
+ release_region(t->base, 0x20);
+ return err;
+ }
pcc_sockets++;
- return;
+ return 0;
}
@@ -683,26 +689,29 @@ static int __init init_m32r_pcc(void)
return ret;
ret = platform_device_register(&pcc_device);
- if (ret){
- platform_driver_unregister(&pcc_driver);
- return ret;
- }
+ if (ret)
+ goto unreg_driv;
printk(KERN_INFO "m32r PCC probe:\n");
pcc_sockets = 0;
- add_pcc_socket(M32R_PCC0_BASE, PCC0_IRQ, M32R_PCC0_MAPBASE, 0x1000);
+ ret = add_pcc_socket(M32R_PCC0_BASE, PCC0_IRQ, M32R_PCC0_MAPBASE,
+ 0x1000);
+ if (ret)
+ goto unreg_dev;
#ifdef CONFIG_M32RPCC_SLOT2
- add_pcc_socket(M32R_PCC1_BASE, PCC1_IRQ, M32R_PCC1_MAPBASE, 0x2000);
+ ret = add_pcc_socket(M32R_PCC1_BASE, PCC1_IRQ, M32R_PCC1_MAPBASE,
+ 0x2000);
+ if (ret)
+ goto unreg_dev;
#endif
if (pcc_sockets == 0) {
printk("socket is not found.\n");
- platform_device_unregister(&pcc_device);
- platform_driver_unregister(&pcc_driver);
- return -ENODEV;
+ ret = -ENODEV;
+ goto unreg_dev;
}
/* Set up interrupt handler(s) */
@@ -728,6 +737,12 @@ static int __init init_m32r_pcc(void)
}
return 0;
+
+unreg_dev:
+ platform_device_unregister(&pcc_device);
+unreg_driv:
+ platform_driver_unregister(&pcc_driver);
+ return ret;
} /* init_m32r_pcc */
static void __exit exit_m32r_pcc(void)
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
index e7899624aa0b..35bbe288ddb4 100644
--- a/drivers/sh/intc/virq.c
+++ b/drivers/sh/intc/virq.c
@@ -254,7 +254,7 @@ restart:
radix_tree_tag_clear(&d->tree, entry->enum_id,
INTC_TAG_VIRQ_NEEDS_ALLOC);
- radix_tree_replace_slot((void **)entries[i],
+ radix_tree_replace_slot(&d->tree, (void **)entries[i],
&intc_irq_xlate[irq]);
}