summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2014-01-16 17:01:42 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2014-01-16 17:01:42 +1100
commitade76b133f476ac0307a212e77f8b53d160c53d3 (patch)
treee51b9ee1e111448e04f07063dd570b7f901ce308
parente8ed342f0f447dc91b4d4b6e16a190875582319b (diff)
parent7f8c1e382d68dcc91e892ef427f155c0568fe975 (diff)
Merge branch 'akpm/master'
-rw-r--r--Documentation/ABI/testing/sysfs-class-bdi8
-rw-r--r--Documentation/blockdev/zram.txt (renamed from drivers/staging/zram/zram.txt)6
-rw-r--r--MAINTAINERS16
-rw-r--r--arch/arm/mm/init.c6
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--block/blk-mq-cpu.c6
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/zram/Kconfig (renamed from drivers/staging/zram/Kconfig)1
-rw-r--r--drivers/block/zram/Makefile (renamed from drivers/staging/zram/Makefile)0
-rw-r--r--drivers/block/zram/zram_drv.c (renamed from drivers/staging/zram/zram_drv.c)128
-rw-r--r--drivers/block/zram/zram_drv.h (renamed from drivers/staging/zram/zram_drv.h)32
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/zsmalloc/Kconfig24
-rw-r--r--drivers/staging/zsmalloc/Makefile3
-rw-r--r--drivers/video/backlight/lcd.c2
-rw-r--r--drivers/w1/w1_int.c5
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/smp.h6
-rw-r--r--include/linux/zsmalloc.h (renamed from drivers/staging/zsmalloc/zsmalloc.h)1
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/smp.c68
-rw-r--r--kernel/softirq.c72
-rw-r--r--mm/Kconfig25
-rw-r--r--mm/Makefile1
-rw-r--r--mm/backing-dev.c35
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/zsmalloc.c (renamed from drivers/staging/zsmalloc/zsmalloc-main.c)4
-rwxr-xr-xscripts/checkpatch.pl9
32 files changed, 219 insertions, 262 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-bdi b/Documentation/ABI/testing/sysfs-class-bdi
index d773d5697cf5..3187a18af6da 100644
--- a/Documentation/ABI/testing/sysfs-class-bdi
+++ b/Documentation/ABI/testing/sysfs-class-bdi
@@ -53,3 +53,11 @@ stable_pages_required (read-only)
If set, the backing device requires that all pages comprising a write
request must not be changed until writeout is complete.
+
+strictlimit (read-write)
+
+ Forces per-BDI checks for the share of given device in the write-back
+ cache even before the global background dirty limit is reached. This
+ is useful in situations where the global limit is much higher than
+ affordable for given relatively slow (or untrusted) device. Turning
+ strictlimit on has no visible effect if max_ratio is equal to 100%.
diff --git a/drivers/staging/zram/zram.txt b/Documentation/blockdev/zram.txt
index 765d790ae831..2eccddffa6c8 100644
--- a/drivers/staging/zram/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -1,8 +1,6 @@
zram: Compressed RAM based block devices
----------------------------------------
-Project home: http://compcache.googlecode.com/
-
* Introduction
The zram module creates RAM based block devices named /dev/zram<id>
@@ -69,9 +67,5 @@ Following shows a typical sequence of steps for using zram.
resets the disksize to zero. You must set the disksize again
before reusing the device.
-Please report any problems at:
- - Mailing list: linux-mm-cc at laptop dot org
- - Issue tracker: http://code.google.com/p/compcache/issues/list
-
Nitin Gupta
ngupta@vflare.org
diff --git a/MAINTAINERS b/MAINTAINERS
index 7798b077d7f9..070800cc4c86 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9782,11 +9782,27 @@ T: Mercurial http://linuxtv.org/hg/v4l-dvb
S: Odd Fixes
F: drivers/media/pci/zoran/
+ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER
+M: Minchan Kim <minchan@kernel.org>
+M: Nitin Gupta <ngupta@vflare.org>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: drivers/block/zram/
+F: Documentation/blockdev/zram.txt
+
ZS DECSTATION Z85C30 SERIAL DRIVER
M: "Maciej W. Rozycki" <macro@linux-mips.org>
S: Maintained
F: drivers/tty/serial/zs.*
+ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR
+M: Minchan Kim <minchan@kernel.org>
+M: Nitin Gupta <ngupta@vflare.org>
+L: linux-mm@kvack.org
+S: Maintained
+F: mm/zsmalloc.c
+F: include/linux/zsmalloc.h
+
ZSWAP COMPRESSED SWAP CACHING
M: Seth Jennings <sjenning@linux.vnet.ibm.com>
L: linux-mm@kvack.org
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8a271ffce12a..403ba90edcf3 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -153,7 +153,6 @@ EXPORT_SYMBOL(arm_dma_zone_size);
* allocations. This must be the smallest DMA mask in the system,
* so a successful GFP_DMA allocation will always satisfy this.
*/
-phys_addr_t arm_dma_limit;
unsigned long arm_dma_pfn_limit;
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
@@ -172,6 +171,8 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
void __init setup_dma_zone(const struct machine_desc *mdesc)
{
#ifdef CONFIG_ZONE_DMA
+ phys_addr_t arm_dma_limit;
+
if (mdesc->dma_zone_size) {
arm_dma_zone_size = mdesc->dma_zone_size;
arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
@@ -326,7 +327,8 @@ void __init arm_memblock_init(struct meminfo *mi,
* reserve memory for DMA contigouos allocations,
* must come from DMA area inside low memory
*/
- dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
+ dma_contiguous_reserve(min((phys_addr_t)arm_dma_pfn_limit << PAGE_SHIFT,
+ arm_lowmem_limit));
arm_memblock_steal_permitted = false;
memblock_dump_all();
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a982d15a88..11f350997e54 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -80,10 +80,8 @@ extern __init void add_static_vm_early(struct static_vm *svm);
#endif
#ifdef CONFIG_ZONE_DMA
-extern phys_addr_t arm_dma_limit;
extern unsigned long arm_dma_pfn_limit;
#else
-#define arm_dma_limit ((phys_addr_t)~0)
#define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT)
#endif
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
index 20576e3476e9..3146befb56aa 100644
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -28,10 +28,6 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata blk_mq_main_cpu_notifier = {
- .notifier_call = blk_mq_main_cpu_notify,
-};
-
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
BUG_ON(!notifier->notify);
@@ -58,5 +54,5 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
void __init blk_mq_cpu_init(void)
{
- register_hotcpu_notifier(&blk_mq_main_cpu_notifier);
+ hotcpu_notifier(blk_mq_main_cpu_notify, 0);
}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 9ffa90c6201c..014a1cfc41c5 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -108,6 +108,8 @@ source "drivers/block/paride/Kconfig"
source "drivers/block/mtip32xx/Kconfig"
+source "drivers/block/zram/Kconfig"
+
config BLK_CPQ_DA
tristate "Compaq SMART2 support"
depends on PCI && VIRT_TO_BUS && 0
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 816d979c3266..02b688d1438d 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
+obj-$(CONFIG_ZRAM) += zram/
nvme-y := nvme-core.o nvme-scsi.o
skd-y := skd_main.o
diff --git a/drivers/staging/zram/Kconfig b/drivers/block/zram/Kconfig
index 983314c41349..3450be850399 100644
--- a/drivers/staging/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -14,7 +14,6 @@ config ZRAM
disks and maybe many more.
See zram.txt for more information.
- Project home: <https://compcache.googlecode.com/>
config ZRAM_DEBUG
bool "Compressed RAM block device debug support"
diff --git a/drivers/staging/zram/Makefile b/drivers/block/zram/Makefile
index cb0f9ced6a93..cb0f9ced6a93 100644
--- a/drivers/staging/zram/Makefile
+++ b/drivers/block/zram/Makefile
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 108f2733106d..011e55d820b1 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -2,6 +2,7 @@
* Compressed RAM block device
*
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
+ * 2012, 2013 Minchan Kim
*
* This code is released using a dual license strategy: BSD/GPL
* You can choose the licence that better fits your requirements.
@@ -9,7 +10,6 @@
* Released under the terms of 3-clause BSD License
* Released under the terms of GNU General Public License Version 2.0
*
- * Project home: http://compcache.googlecode.com
*/
#define KMSG_COMPONENT "zram"
@@ -104,7 +104,7 @@ static ssize_t zero_pages_show(struct device *dev,
{
struct zram *zram = dev_to_zram(dev);
- return sprintf(buf, "%u\n", zram->stats.pages_zero);
+ return sprintf(buf, "%u\n", atomic_read(&zram->stats.pages_zero));
}
static ssize_t orig_data_size_show(struct device *dev,
@@ -113,7 +113,7 @@ static ssize_t orig_data_size_show(struct device *dev,
struct zram *zram = dev_to_zram(dev);
return sprintf(buf, "%llu\n",
- (u64)(zram->stats.pages_stored) << PAGE_SHIFT);
+ (u64)(atomic_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
}
static ssize_t compr_data_size_show(struct device *dev,
@@ -140,6 +140,7 @@ static ssize_t mem_used_total_show(struct device *dev,
return sprintf(buf, "%llu\n", val);
}
+/* flag operations needs meta->tb_lock */
static int zram_test_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
@@ -228,6 +229,8 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
goto free_table;
}
+ rwlock_init(&meta->tb_lock);
+ mutex_init(&meta->buffer_lock);
return meta;
free_table:
@@ -280,6 +283,7 @@ static void handle_zero_page(struct bio_vec *bvec)
flush_dcache_page(page);
}
+/* NOTE: caller should hold meta->tb_lock with write-side */
static void zram_free_page(struct zram *zram, size_t index)
{
struct zram_meta *meta = zram->meta;
@@ -293,21 +297,21 @@ static void zram_free_page(struct zram *zram, size_t index)
*/
if (zram_test_flag(meta, index, ZRAM_ZERO)) {
zram_clear_flag(meta, index, ZRAM_ZERO);
- zram->stats.pages_zero--;
+ atomic_dec(&zram->stats.pages_zero);
}
return;
}
if (unlikely(size > max_zpage_size))
- zram->stats.bad_compress--;
+ atomic_dec(&zram->stats.bad_compress);
zs_free(meta->mem_pool, handle);
if (size <= PAGE_SIZE / 2)
- zram->stats.good_compress--;
+ atomic_dec(&zram->stats.good_compress);
atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
- zram->stats.pages_stored--;
+ atomic_dec(&zram->stats.pages_stored);
meta->table[index].handle = 0;
meta->table[index].size = 0;
@@ -319,20 +323,26 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
size_t clen = PAGE_SIZE;
unsigned char *cmem;
struct zram_meta *meta = zram->meta;
- unsigned long handle = meta->table[index].handle;
+ unsigned long handle;
+ u16 size;
+
+ read_lock(&meta->tb_lock);
+ handle = meta->table[index].handle;
+ size = meta->table[index].size;
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+ read_unlock(&meta->tb_lock);
clear_page(mem);
return 0;
}
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
- if (meta->table[index].size == PAGE_SIZE)
+ if (size == PAGE_SIZE)
copy_page(mem, cmem);
else
- ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
- mem, &clen);
+ ret = lzo1x_decompress_safe(cmem, size, mem, &clen);
zs_unmap_object(meta->mem_pool, handle);
+ read_unlock(&meta->tb_lock);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) {
@@ -353,11 +363,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
struct zram_meta *meta = zram->meta;
page = bvec->bv_page;
+ read_lock(&meta->tb_lock);
if (unlikely(!meta->table[index].handle) ||
zram_test_flag(meta, index, ZRAM_ZERO)) {
+ read_unlock(&meta->tb_lock);
handle_zero_page(bvec);
return 0;
}
+ read_unlock(&meta->tb_lock);
if (is_partial_io(bvec))
/* Use a temporary buffer to decompress the page */
@@ -400,6 +413,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
struct page *page;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
struct zram_meta *meta = zram->meta;
+ bool locked = false;
page = bvec->bv_page;
src = meta->compress_buffer;
@@ -419,6 +433,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
goto out;
}
+ mutex_lock(&meta->buffer_lock);
+ locked = true;
user_mem = kmap_atomic(page);
if (is_partial_io(bvec)) {
@@ -433,25 +449,18 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if (page_zero_filled(uncmem)) {
kunmap_atomic(user_mem);
/* Free memory associated with this sector now. */
+ write_lock(&zram->meta->tb_lock);
zram_free_page(zram, index);
-
- zram->stats.pages_zero++;
zram_set_flag(meta, index, ZRAM_ZERO);
+ write_unlock(&zram->meta->tb_lock);
+
+ atomic_inc(&zram->stats.pages_zero);
ret = 0;
goto out;
}
- /*
- * zram_slot_free_notify could miss free so that let's
- * double check.
- */
- if (unlikely(meta->table[index].handle ||
- zram_test_flag(meta, index, ZRAM_ZERO)))
- zram_free_page(zram, index);
-
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
meta->compress_workmem);
-
if (!is_partial_io(bvec)) {
kunmap_atomic(user_mem);
user_mem = NULL;
@@ -464,7 +473,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
}
if (unlikely(clen > max_zpage_size)) {
- zram->stats.bad_compress++;
+ atomic_inc(&zram->stats.bad_compress);
clen = PAGE_SIZE;
src = NULL;
if (is_partial_io(bvec))
@@ -494,18 +503,22 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
* Free memory associated with this sector
* before overwriting unused sectors.
*/
+ write_lock(&zram->meta->tb_lock);
zram_free_page(zram, index);
meta->table[index].handle = handle;
meta->table[index].size = clen;
+ write_unlock(&zram->meta->tb_lock);
/* Update stats */
atomic64_add(clen, &zram->stats.compr_size);
- zram->stats.pages_stored++;
+ atomic_inc(&zram->stats.pages_stored);
if (clen <= PAGE_SIZE / 2)
- zram->stats.good_compress++;
+ atomic_inc(&zram->stats.good_compress);
out:
+ if (locked)
+ mutex_unlock(&meta->buffer_lock);
if (is_partial_io(bvec))
kfree(uncmem);
@@ -514,36 +527,15 @@ out:
return ret;
}
-static void handle_pending_slot_free(struct zram *zram)
-{
- struct zram_slot_free *free_rq;
-
- spin_lock(&zram->slot_free_lock);
- while (zram->slot_free_rq) {
- free_rq = zram->slot_free_rq;
- zram->slot_free_rq = free_rq->next;
- zram_free_page(zram, free_rq->index);
- kfree(free_rq);
- }
- spin_unlock(&zram->slot_free_lock);
-}
-
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, struct bio *bio, int rw)
{
int ret;
- if (rw == READ) {
- down_read(&zram->lock);
- handle_pending_slot_free(zram);
+ if (rw == READ)
ret = zram_bvec_read(zram, bvec, index, offset, bio);
- up_read(&zram->lock);
- } else {
- down_write(&zram->lock);
- handle_pending_slot_free(zram);
+ else
ret = zram_bvec_write(zram, bvec, index, offset);
- up_write(&zram->lock);
- }
return ret;
}
@@ -553,8 +545,6 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
size_t index;
struct zram_meta *meta;
- flush_work(&zram->free_work);
-
down_write(&zram->init_lock);
if (!zram->init_done) {
up_write(&zram->init_lock);
@@ -762,40 +752,19 @@ error:
bio_io_error(bio);
}
-static void zram_slot_free(struct work_struct *work)
-{
- struct zram *zram;
-
- zram = container_of(work, struct zram, free_work);
- down_write(&zram->lock);
- handle_pending_slot_free(zram);
- up_write(&zram->lock);
-}
-
-static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
-{
- spin_lock(&zram->slot_free_lock);
- free_rq->next = zram->slot_free_rq;
- zram->slot_free_rq = free_rq;
- spin_unlock(&zram->slot_free_lock);
-}
-
static void zram_slot_free_notify(struct block_device *bdev,
unsigned long index)
{
struct zram *zram;
- struct zram_slot_free *free_rq;
+ struct zram_meta *meta;
zram = bdev->bd_disk->private_data;
- atomic64_inc(&zram->stats.notify_free);
-
- free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
- if (!free_rq)
- return;
+ meta = zram->meta;
- free_rq->index = index;
- add_slot_free(zram, free_rq);
- schedule_work(&zram->free_work);
+ write_lock(&meta->tb_lock);
+ zram_free_page(zram, index);
+ write_unlock(&meta->tb_lock);
+ atomic64_inc(&zram->stats.notify_free);
}
static const struct block_device_operations zram_devops = {
@@ -839,13 +808,8 @@ static int create_device(struct zram *zram, int device_id)
{
int ret = -ENOMEM;
- init_rwsem(&zram->lock);
init_rwsem(&zram->init_lock);
- INIT_WORK(&zram->free_work, zram_slot_free);
- spin_lock_init(&zram->slot_free_lock);
- zram->slot_free_rq = NULL;
-
zram->queue = blk_alloc_queue(GFP_KERNEL);
if (!zram->queue) {
pr_err("Error allocating disk queue for device %d\n",
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 97a3acf6ab76..ad8aa35bae00 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -2,6 +2,7 @@
* Compressed RAM block device
*
* Copyright (C) 2008, 2009, 2010 Nitin Gupta
+ * 2012, 2013 Minchan Kim
*
* This code is released using a dual license strategy: BSD/GPL
* You can choose the licence that better fits your requirements.
@@ -9,7 +10,6 @@
* Released under the terms of 3-clause BSD License
* Released under the terms of GNU General Public License Version 2.0
*
- * Project home: http://compcache.googlecode.com
*/
#ifndef _ZRAM_DRV_H_
@@ -17,8 +17,7 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
-
-#include "../zsmalloc/zsmalloc.h"
+#include <linux/zsmalloc.h>
/*
* Some arbitrary value. This is just to catch
@@ -69,10 +68,6 @@ struct table {
u8 flags;
} __aligned(4);
-/*
- * All 64bit fields should only be manipulated by 64bit atomic accessors.
- * All modifications to 32bit counter should be protected by zram->lock.
- */
struct zram_stats {
atomic64_t compr_size; /* compressed size of pages stored */
atomic64_t num_reads; /* failed + successful */
@@ -81,33 +76,23 @@ struct zram_stats {
atomic64_t failed_writes; /* can happen when memory is too low */
atomic64_t invalid_io; /* non-page-aligned I/O requests */
atomic64_t notify_free; /* no. of swap slot free notifications */
- u32 pages_zero; /* no. of zero filled pages */
- u32 pages_stored; /* no. of pages currently stored */
- u32 good_compress; /* % of pages with compression ratio<=50% */
- u32 bad_compress; /* % of pages with compression ratio>=75% */
+ atomic_t pages_zero; /* no. of zero filled pages */
+ atomic_t pages_stored; /* no. of pages currently stored */
+ atomic_t good_compress; /* % of pages with compression ratio<=50% */
+ atomic_t bad_compress; /* % of pages with compression ratio>=75% */
};
struct zram_meta {
+ rwlock_t tb_lock; /* protect table */
void *compress_workmem;
void *compress_buffer;
struct table *table;
struct zs_pool *mem_pool;
-};
-
-struct zram_slot_free {
- unsigned long index;
- struct zram_slot_free *next;
+ struct mutex buffer_lock; /* protect compress buffers */
};
struct zram {
struct zram_meta *meta;
- struct rw_semaphore lock; /* protect compression buffers, table,
- * 32bit stat counters against concurrent
- * notifications, reads and writes */
-
- struct work_struct free_work; /* handle pending free request */
- struct zram_slot_free *slot_free_rq; /* list head of free request */
-
struct request_queue *queue;
struct gendisk *disk;
int init_done;
@@ -118,7 +103,6 @@ struct zram {
* we can store in a disk.
*/
u64 disksize; /* bytes */
- spinlock_t slot_free_lock;
struct zram_stats stats;
};
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 930694d3a13f..71e49000fbf3 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -150,6 +150,7 @@ int mdiobus_register(struct mii_bus *bus)
err = device_register(&bus->dev);
if (err) {
pr_err("mii_bus %s failed to register\n", bus->id);
+ put_device(&bus->dev);
return -EINVAL;
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 4bb6b11166b3..040a51525b42 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -76,10 +76,6 @@ source "drivers/staging/sep/Kconfig"
source "drivers/staging/iio/Kconfig"
-source "drivers/staging/zsmalloc/Kconfig"
-
-source "drivers/staging/zram/Kconfig"
-
source "drivers/staging/wlags49_h2/Kconfig"
source "drivers/staging/wlags49_h25/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 9f07e5e16094..dea056bf7ff2 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -32,8 +32,6 @@ obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
-obj-$(CONFIG_ZRAM) += zram/
-obj-$(CONFIG_ZSMALLOC) += zsmalloc/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
obj-$(CONFIG_FB_SM7XX) += sm7xxfb/
diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig
deleted file mode 100644
index 9d1f2a24ad62..000000000000
--- a/drivers/staging/zsmalloc/Kconfig
+++ /dev/null
@@ -1,24 +0,0 @@
-config ZSMALLOC
- bool "Memory allocator for compressed pages"
- depends on MMU
- default n
- help
- zsmalloc is a slab-based memory allocator designed to store
- compressed RAM pages. zsmalloc uses virtual memory mapping
- in order to reduce fragmentation. However, this results in a
- non-standard allocator interface where a handle, not a pointer, is
- returned by an alloc(). This handle must be mapped in order to
- access the allocated space.
-
-config PGTABLE_MAPPING
- bool "Use page table mapping to access object in zsmalloc"
- depends on ZSMALLOC
- help
- By default, zsmalloc uses a copy-based object mapping method to
- access allocations that span two pages. However, if a particular
- architecture (ex, ARM) performs VM mapping faster than copying,
- then you should select this. This causes zsmalloc to use page table
- mapping rather than copying for object mapping.
-
- You can check speed with zsmalloc benchmark[1].
- [1] https://github.com/spartacus06/zsmalloc
diff --git a/drivers/staging/zsmalloc/Makefile b/drivers/staging/zsmalloc/Makefile
deleted file mode 100644
index b134848a590d..000000000000
--- a/drivers/staging/zsmalloc/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-zsmalloc-y := zsmalloc-main.o
-
-obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 93cf15efc717..7de847df224f 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -228,7 +228,7 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
rc = device_register(&new_ld->dev);
if (rc) {
- kfree(new_ld);
+ put_device(&new_ld->dev);
return ERR_PTR(rc);
}
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 590bd8a7cd1b..32be1a1a3e9c 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -90,9 +90,8 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
err = device_register(&dev->dev);
if (err) {
printk(KERN_ERR "Failed to register master device. err=%d\n", err);
- memset(dev, 0, sizeof(struct w1_master));
- kfree(dev);
- dev = NULL;
+ put_device(&dev->dev);
+ return NULL;
}
return dev;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0375654adb28..8678c4322b44 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -95,10 +95,7 @@ enum rq_cmd_type_bits {
* as well!
*/
struct request {
- union {
- struct list_head queuelist;
- struct llist_node ll_list;
- };
+ struct list_head queuelist;
union {
struct call_single_data csd;
struct work_struct mq_flush_data;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index db43b58a3355..0053adde0ed9 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -360,7 +360,7 @@ enum
/* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq.
*/
-extern char *softirq_to_name[NR_SOFTIRQS];
+extern const char * const softirq_to_name[NR_SOFTIRQS];
/* softirq mask and active fields moved to irq_cpustat_t in
* asm/hardirq.h to get better cache usage. KAO
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 5da22ee42e16..3834f43f9993 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -11,12 +11,16 @@
#include <linux/list.h>
#include <linux/cpumask.h>
#include <linux/init.h>
+#include <linux/llist.h>
extern void cpu_idle(void);
typedef void (*smp_call_func_t)(void *info);
struct call_single_data {
- struct list_head list;
+ union {
+ struct list_head list;
+ struct llist_node llist;
+ };
smp_call_func_t func;
void *info;
u16 flags;
diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/include/linux/zsmalloc.h
index c2eb174b97ee..e44d634e7fb7 100644
--- a/drivers/staging/zsmalloc/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -2,6 +2,7 @@
* zsmalloc memory allocator
*
* Copyright (C) 2011 Nitin Gupta
+ * Copyright (C) 2012, 2013 Minchan Kim
*
* This code is released using a dual license strategy: BSD/GPL
* You can choose the license that better fits your requirements.
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 00232e288f61..7d76c6621734 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1537,7 +1537,7 @@ void vmcoreinfo_append_str(const char *fmt, ...)
size_t r;
va_start(args, fmt);
- r = vsnprintf(buf, sizeof(buf), fmt, args);
+ r = vscnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
diff --git a/kernel/smp.c b/kernel/smp.c
index bd9f94028838..ffee35bef179 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -23,17 +23,11 @@ enum {
struct call_function_data {
struct call_single_data __percpu *csd;
cpumask_var_t cpumask;
- cpumask_var_t cpumask_ipi;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
-struct call_single_queue {
- struct list_head list;
- raw_spinlock_t lock;
-};
-
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
static int
hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
@@ -47,14 +41,8 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
cpu_to_node(cpu)))
return notifier_from_errno(-ENOMEM);
- if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
- cpu_to_node(cpu))) {
- free_cpumask_var(cfd->cpumask);
- return notifier_from_errno(-ENOMEM);
- }
cfd->csd = alloc_percpu(struct call_single_data);
if (!cfd->csd) {
- free_cpumask_var(cfd->cpumask_ipi);
free_cpumask_var(cfd->cpumask);
return notifier_from_errno(-ENOMEM);
}
@@ -67,7 +55,6 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_DEAD:
case CPU_DEAD_FROZEN:
free_cpumask_var(cfd->cpumask);
- free_cpumask_var(cfd->cpumask_ipi);
free_percpu(cfd->csd);
break;
#endif
@@ -85,12 +72,8 @@ void __init call_function_init(void)
void *cpu = (void *)(long)smp_processor_id();
int i;
- for_each_possible_cpu(i) {
- struct call_single_queue *q = &per_cpu(call_single_queue, i);
-
- raw_spin_lock_init(&q->lock);
- INIT_LIST_HEAD(&q->list);
- }
+ for_each_possible_cpu(i)
+ init_llist_head(&per_cpu(call_single_queue, i));
hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
register_cpu_notifier(&hotplug_cfd_notifier);
@@ -141,18 +124,9 @@ static void csd_unlock(struct call_single_data *csd)
*/
static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
{
- struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
- unsigned long flags;
- int ipi;
-
if (wait)
csd->flags |= CSD_FLAG_WAIT;
- raw_spin_lock_irqsave(&dst->lock, flags);
- ipi = list_empty(&dst->list);
- list_add_tail(&csd->list, &dst->list);
- raw_spin_unlock_irqrestore(&dst->lock, flags);
-
/*
* The list addition should be visible before sending the IPI
* handler locks the list to pull the entry off it because of
@@ -164,7 +138,7 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
* locking and barrier primitives. Generic code isn't really
* equipped to do the right thing...
*/
- if (ipi)
+ if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
arch_send_call_function_single_ipi(cpu);
if (wait)
@@ -177,27 +151,26 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
*/
void generic_smp_call_function_single_interrupt(void)
{
- struct call_single_queue *q = &__get_cpu_var(call_single_queue);
- LIST_HEAD(list);
+ struct llist_node *entry, *next;
/*
* Shouldn't receive this interrupt on a cpu that is not yet online.
*/
WARN_ON_ONCE(!cpu_online(smp_processor_id()));
- raw_spin_lock(&q->lock);
- list_replace_init(&q->list, &list);
- raw_spin_unlock(&q->lock);
+ entry = llist_del_all(&__get_cpu_var(call_single_queue));
+ entry = llist_reverse_order(entry);
- while (!list_empty(&list)) {
+ while (entry) {
struct call_single_data *csd;
- csd = list_entry(list.next, struct call_single_data, list);
- list_del(&csd->list);
+ next = entry->next;
+ csd = llist_entry(entry, struct call_single_data, llist);
csd->func(csd->info);
-
csd_unlock(csd);
+
+ entry = next;
}
}
@@ -402,30 +375,17 @@ void smp_call_function_many(const struct cpumask *mask,
if (unlikely(!cpumask_weight(cfd->cpumask)))
return;
- /*
- * After we put an entry into the list, cfd->cpumask may be cleared
- * again when another CPU sends another IPI for a SMP function call, so
- * cfd->cpumask will be zero.
- */
- cpumask_copy(cfd->cpumask_ipi, cfd->cpumask);
-
for_each_cpu(cpu, cfd->cpumask) {
struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
- struct call_single_queue *dst =
- &per_cpu(call_single_queue, cpu);
- unsigned long flags;
csd_lock(csd);
csd->func = func;
csd->info = info;
-
- raw_spin_lock_irqsave(&dst->lock, flags);
- list_add_tail(&csd->list, &dst->list);
- raw_spin_unlock_irqrestore(&dst->lock, flags);
+ llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
}
/* Send a message to all CPUs in the map */
- arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
+ arch_send_call_function_ipi_mask(cfd->cpumask);
if (wait) {
for_each_cpu(cpu, cfd->cpumask) {
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 8a1e6e104892..850967068aaf 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -8,6 +8,8 @@
* Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/export.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
@@ -54,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
-char *softirq_to_name[NR_SOFTIRQS] = {
+const char * const softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
"TASKLET", "SCHED", "HRTIMER", "RCU"
};
@@ -136,7 +138,6 @@ void _local_bh_enable(void)
WARN_ON_ONCE(in_irq());
__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
}
-
EXPORT_SYMBOL(_local_bh_enable);
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
@@ -153,7 +154,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
/*
* Keep preemption disabled until we are done with
* softirq processing:
- */
+ */
preempt_count_sub(cnt - 1);
if (unlikely(!in_interrupt() && local_softirq_pending())) {
@@ -229,6 +230,7 @@ asmlinkage void __do_softirq(void)
struct softirq_action *h;
bool in_hardirq;
__u32 pending;
+ int softirq_bit;
int cpu;
/*
@@ -253,30 +255,30 @@ restart:
h = softirq_vec;
- do {
- if (pending & 1) {
- unsigned int vec_nr = h - softirq_vec;
- int prev_count = preempt_count();
-
- kstat_incr_softirqs_this_cpu(vec_nr);
-
- trace_softirq_entry(vec_nr);
- h->action(h);
- trace_softirq_exit(vec_nr);
- if (unlikely(prev_count != preempt_count())) {
- printk(KERN_ERR "huh, entered softirq %u %s %p"
- "with preempt_count %08x,"
- " exited with %08x?\n", vec_nr,
- softirq_to_name[vec_nr], h->action,
- prev_count, preempt_count());
- preempt_count_set(prev_count);
- }
+ while ((softirq_bit = ffs(pending))) {
+ unsigned int vec_nr;
+ int prev_count;
- rcu_bh_qs(cpu);
+ h += softirq_bit - 1;
+
+ vec_nr = h - softirq_vec;
+ prev_count = preempt_count();
+
+ kstat_incr_softirqs_this_cpu(vec_nr);
+
+ trace_softirq_entry(vec_nr);
+ h->action(h);
+ trace_softirq_exit(vec_nr);
+ if (unlikely(prev_count != preempt_count())) {
+ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
+ vec_nr, softirq_to_name[vec_nr], h->action,
+ prev_count, preempt_count());
+ preempt_count_set(prev_count);
}
+ rcu_bh_qs(cpu);
h++;
- pending >>= 1;
- } while (pending);
+ pending >>= softirq_bit;
+ }
local_irq_disable();
@@ -433,8 +435,7 @@ void open_softirq(int nr, void (*action)(struct softirq_action *))
/*
* Tasklets
*/
-struct tasklet_head
-{
+struct tasklet_head {
struct tasklet_struct *head;
struct tasklet_struct **tail;
};
@@ -453,7 +454,6 @@ void __tasklet_schedule(struct tasklet_struct *t)
raise_softirq_irqoff(TASKLET_SOFTIRQ);
local_irq_restore(flags);
}
-
EXPORT_SYMBOL(__tasklet_schedule);
void __tasklet_hi_schedule(struct tasklet_struct *t)
@@ -467,7 +467,6 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_restore(flags);
}
-
EXPORT_SYMBOL(__tasklet_hi_schedule);
void __tasklet_hi_schedule_first(struct tasklet_struct *t)
@@ -478,7 +477,6 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
__this_cpu_write(tasklet_hi_vec.head, t);
__raise_softirq_irqoff(HI_SOFTIRQ);
}
-
EXPORT_SYMBOL(__tasklet_hi_schedule_first);
static void tasklet_action(struct softirq_action *a)
@@ -498,7 +496,8 @@ static void tasklet_action(struct softirq_action *a)
if (tasklet_trylock(t)) {
if (!atomic_read(&t->count)) {
- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED,
+ &t->state))
BUG();
t->func(t->data);
tasklet_unlock(t);
@@ -533,7 +532,8 @@ static void tasklet_hi_action(struct softirq_action *a)
if (tasklet_trylock(t)) {
if (!atomic_read(&t->count)) {
- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED,
+ &t->state))
BUG();
t->func(t->data);
tasklet_unlock(t);
@@ -551,7 +551,6 @@ static void tasklet_hi_action(struct softirq_action *a)
}
}
-
void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data)
{
@@ -561,13 +560,12 @@ void tasklet_init(struct tasklet_struct *t,
t->func = func;
t->data = data;
}
-
EXPORT_SYMBOL(tasklet_init);
void tasklet_kill(struct tasklet_struct *t)
{
if (in_interrupt())
- printk("Attempt to kill tasklet from interrupt\n");
+ pr_notice("Attempt to kill tasklet from interrupt\n");
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
do {
@@ -577,7 +575,6 @@ void tasklet_kill(struct tasklet_struct *t)
tasklet_unlock_wait(t);
clear_bit(TASKLET_STATE_SCHED, &t->state);
}
-
EXPORT_SYMBOL(tasklet_kill);
/*
@@ -727,9 +724,8 @@ static void takeover_tasklets(unsigned int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int cpu_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+static int cpu_callback(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
{
switch (action) {
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/mm/Kconfig b/mm/Kconfig
index 723bbe04a0b0..2d9f1504d75e 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -552,3 +552,28 @@ config MEM_SOFT_DIRTY
it can be cleared by hands.
See Documentation/vm/soft-dirty.txt for more details.
+
+config ZSMALLOC
+ bool "Memory allocator for compressed pages"
+ depends on MMU
+ default n
+ help
+ zsmalloc is a slab-based memory allocator designed to store
+ compressed RAM pages. zsmalloc uses virtual memory mapping
+ in order to reduce fragmentation. However, this results in a
+ non-standard allocator interface where a handle, not a pointer, is
+ returned by an alloc(). This handle must be mapped in order to
+ access the allocated space.
+
+config PGTABLE_MAPPING
+ bool "Use page table mapping to access object in zsmalloc"
+ depends on ZSMALLOC
+ help
+ By default, zsmalloc uses a copy-based object mapping method to
+ access allocations that span two pages. However, if a particular
+ architecture (ex, ARM) performs VM mapping faster than copying,
+ then you should select this. This causes zsmalloc to use page table
+ mapping rather than copying for object mapping.
+
+ You can check speed with zsmalloc benchmark[1].
+ [1] https://github.com/spartacus06/zsmalloc
diff --git a/mm/Makefile b/mm/Makefile
index 305d10acd081..310c90a09264 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -60,3 +60,4 @@ obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
obj-$(CONFIG_CLEANCACHE) += cleancache.o
obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
obj-$(CONFIG_ZBUD) += zbud.o
+obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index ce682f7a4f29..4ee1d64b52a2 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -234,11 +234,46 @@ static ssize_t stable_pages_required_show(struct device *dev,
}
static DEVICE_ATTR_RO(stable_pages_required);
+static ssize_t strictlimit_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct backing_dev_info *bdi = dev_get_drvdata(dev);
+ unsigned int val;
+ ssize_t ret;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ switch (val) {
+ case 0:
+ bdi->capabilities &= ~BDI_CAP_STRICTLIMIT;
+ break;
+ case 1:
+ bdi->capabilities |= BDI_CAP_STRICTLIMIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+static ssize_t strictlimit_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct backing_dev_info *bdi = dev_get_drvdata(dev);
+
+ return snprintf(page, PAGE_SIZE-1, "%d\n",
+ !!(bdi->capabilities & BDI_CAP_STRICTLIMIT));
+}
+static DEVICE_ATTR_RW(strictlimit);
+
static struct attribute *bdi_dev_attrs[] = {
&dev_attr_read_ahead_kb.attr,
&dev_attr_min_ratio.attr,
&dev_attr_max_ratio.attr,
&dev_attr_stable_pages_required.attr,
+ &dev_attr_strictlimit.attr,
NULL,
};
ATTRIBUTE_GROUPS(bdi_dev);
diff --git a/mm/migrate.c b/mm/migrate.c
index 734704f6f29b..482a33d89134 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1548,8 +1548,6 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
__GFP_NOMEMALLOC | __GFP_NORETRY |
__GFP_NOWARN) &
~GFP_IOFS, 0);
- if (newpage)
- page_cpupid_xchg_last(newpage, page_cpupid_last(page));
return newpage;
}
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/mm/zsmalloc.c
index 7660c87d8b2a..c03ca5e9fe15 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/mm/zsmalloc.c
@@ -2,6 +2,7 @@
* zsmalloc memory allocator
*
* Copyright (C) 2011 Nitin Gupta
+ * Copyright (C) 2012, 2013 Minchan Kim
*
* This code is released using a dual license strategy: BSD/GPL
* You can choose the license that better fits your requirements.
@@ -90,8 +91,7 @@
#include <linux/hardirq.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-
-#include "zsmalloc.h"
+#include <linux/zsmalloc.h>
/*
* This must be power of 2 and greater than of equal to sizeof(link_free).
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 05c99c0b7e6c..be53ba79e09f 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2665,6 +2665,15 @@ sub process {
$herecurr);
}
+# check for function declarations without arguments like "int foo()"
+ if ($line =~ /(\b$Type\s+$Ident)\s*\(\s*\)/) {
+ if (ERROR("FUNCTION_WITHOUT_ARGS",
+ "Bad function definition - $1() should probably be $1(void)\n" . $herecurr) &&
+ $fix) {
+ $fixed[$linenr - 1] =~ s/(\b($Type)\s+($Ident))\s*\(\s*\)/$2 $3(void)/;
+ }
+ }
+
# check for uses of DEFINE_PCI_DEVICE_TABLE
if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) {
if (WARN("DEFINE_PCI_DEVICE_TABLE",