summaryrefslogtreecommitdiff
path: root/mm/zswap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/zswap.c')
-rw-r--r--mm/zswap.c309
1 files changed, 160 insertions, 149 deletions
diff --git a/mm/zswap.c b/mm/zswap.c
index 275b22cc8df4..eedc27894b10 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -76,9 +76,17 @@ static u64 zswap_duplicate_entry;
* tunables
**********************************/
+#define ZSWAP_PARAM_UNSET ""
+
/* Enable/disable zswap (disabled by default) */
static bool zswap_enabled;
-module_param_named(enabled, zswap_enabled, bool, 0644);
+static int zswap_enabled_param_set(const char *,
+ const struct kernel_param *);
+static struct kernel_param_ops zswap_enabled_param_ops = {
+ .set = zswap_enabled_param_set,
+ .get = param_get_bool,
+};
+module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
/* Crypto compressor to use */
#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
@@ -118,7 +126,7 @@ struct zswap_pool {
struct kref kref;
struct list_head list;
struct work_struct work;
- struct notifier_block notifier;
+ struct hlist_node node;
char tfm_name[CRYPTO_MAX_ALG_NAME];
};
@@ -176,6 +184,12 @@ static atomic_t zswap_pools_count = ATOMIC_INIT(0);
/* used by param callback function */
static bool zswap_init_started;
+/* fatal error during init */
+static bool zswap_init_failed;
+
+/* init completed, but couldn't create the initial pool */
+static bool zswap_has_pool;
+
/*********************************
* helpers and fwd declarations
**********************************/
@@ -352,143 +366,58 @@ static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
**********************************/
static DEFINE_PER_CPU(u8 *, zswap_dstmem);
-static int __zswap_cpu_dstmem_notifier(unsigned long action, unsigned long cpu)
+static int zswap_dstmem_prepare(unsigned int cpu)
{
u8 *dst;
- switch (action) {
- case CPU_UP_PREPARE:
- dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
- if (!dst) {
- pr_err("can't allocate compressor buffer\n");
- return NOTIFY_BAD;
- }
- per_cpu(zswap_dstmem, cpu) = dst;
- break;
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- dst = per_cpu(zswap_dstmem, cpu);
- kfree(dst);
- per_cpu(zswap_dstmem, cpu) = NULL;
- break;
- default:
- break;
+ dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
+ if (!dst) {
+ pr_err("can't allocate compressor buffer\n");
+ return -ENOMEM;
}
- return NOTIFY_OK;
+ per_cpu(zswap_dstmem, cpu) = dst;
+ return 0;
}
-static int zswap_cpu_dstmem_notifier(struct notifier_block *nb,
- unsigned long action, void *pcpu)
+static int zswap_dstmem_dead(unsigned int cpu)
{
- return __zswap_cpu_dstmem_notifier(action, (unsigned long)pcpu);
-}
+ u8 *dst;
-static struct notifier_block zswap_dstmem_notifier = {
- .notifier_call = zswap_cpu_dstmem_notifier,
-};
+ dst = per_cpu(zswap_dstmem, cpu);
+ kfree(dst);
+ per_cpu(zswap_dstmem, cpu) = NULL;
-static int __init zswap_cpu_dstmem_init(void)
-{
- unsigned long cpu;
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- if (__zswap_cpu_dstmem_notifier(CPU_UP_PREPARE, cpu) ==
- NOTIFY_BAD)
- goto cleanup;
- __register_cpu_notifier(&zswap_dstmem_notifier);
- cpu_notifier_register_done();
return 0;
-
-cleanup:
- for_each_online_cpu(cpu)
- __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
- cpu_notifier_register_done();
- return -ENOMEM;
-}
-
-static void zswap_cpu_dstmem_destroy(void)
-{
- unsigned long cpu;
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
- __unregister_cpu_notifier(&zswap_dstmem_notifier);
- cpu_notifier_register_done();
}
-static int __zswap_cpu_comp_notifier(struct zswap_pool *pool,
- unsigned long action, unsigned long cpu)
+static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
{
+ struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
struct crypto_comp *tfm;
- switch (action) {
- case CPU_UP_PREPARE:
- if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
- break;
- tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
- if (IS_ERR_OR_NULL(tfm)) {
- pr_err("could not alloc crypto comp %s : %ld\n",
- pool->tfm_name, PTR_ERR(tfm));
- return NOTIFY_BAD;
- }
- *per_cpu_ptr(pool->tfm, cpu) = tfm;
- break;
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- tfm = *per_cpu_ptr(pool->tfm, cpu);
- if (!IS_ERR_OR_NULL(tfm))
- crypto_free_comp(tfm);
- *per_cpu_ptr(pool->tfm, cpu) = NULL;
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static int zswap_cpu_comp_notifier(struct notifier_block *nb,
- unsigned long action, void *pcpu)
-{
- unsigned long cpu = (unsigned long)pcpu;
- struct zswap_pool *pool = container_of(nb, typeof(*pool), notifier);
-
- return __zswap_cpu_comp_notifier(pool, action, cpu);
-}
+ if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
+ return 0;
-static int zswap_cpu_comp_init(struct zswap_pool *pool)
-{
- unsigned long cpu;
-
- memset(&pool->notifier, 0, sizeof(pool->notifier));
- pool->notifier.notifier_call = zswap_cpu_comp_notifier;
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- if (__zswap_cpu_comp_notifier(pool, CPU_UP_PREPARE, cpu) ==
- NOTIFY_BAD)
- goto cleanup;
- __register_cpu_notifier(&pool->notifier);
- cpu_notifier_register_done();
+ tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
+ if (IS_ERR_OR_NULL(tfm)) {
+ pr_err("could not alloc crypto comp %s : %ld\n",
+ pool->tfm_name, PTR_ERR(tfm));
+ return -ENOMEM;
+ }
+ *per_cpu_ptr(pool->tfm, cpu) = tfm;
return 0;
-
-cleanup:
- for_each_online_cpu(cpu)
- __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
- cpu_notifier_register_done();
- return -ENOMEM;
}
-static void zswap_cpu_comp_destroy(struct zswap_pool *pool)
+static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
{
- unsigned long cpu;
+ struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
+ struct crypto_comp *tfm;
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
- __unregister_cpu_notifier(&pool->notifier);
- cpu_notifier_register_done();
+ tfm = *per_cpu_ptr(pool->tfm, cpu);
+ if (!IS_ERR_OR_NULL(tfm))
+ crypto_free_comp(tfm);
+ *per_cpu_ptr(pool->tfm, cpu) = NULL;
+ return 0;
}
/*********************************
@@ -500,7 +429,8 @@ static struct zswap_pool *__zswap_pool_current(void)
struct zswap_pool *pool;
pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
- WARN_ON(!pool);
+ WARN_ONCE(!pool && zswap_has_pool,
+ "%s: no page storage pool!\n", __func__);
return pool;
}
@@ -519,7 +449,7 @@ static struct zswap_pool *zswap_pool_current_get(void)
rcu_read_lock();
pool = __zswap_pool_current();
- if (!pool || !zswap_pool_get(pool))
+ if (!zswap_pool_get(pool))
pool = NULL;
rcu_read_unlock();
@@ -535,7 +465,9 @@ static struct zswap_pool *zswap_pool_last_get(void)
list_for_each_entry_rcu(pool, &zswap_pools, list)
last = pool;
- if (!WARN_ON(!last) && !zswap_pool_get(last))
+ WARN_ONCE(!last && zswap_has_pool,
+ "%s: no page storage pool!\n", __func__);
+ if (!zswap_pool_get(last))
last = NULL;
rcu_read_unlock();
@@ -569,6 +501,18 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
struct zswap_pool *pool;
char name[38]; /* 'zswap' + 32 char (max) num + \0 */
gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
+ int ret;
+
+ if (!zswap_has_pool) {
+ /* if either are unset, pool initialization failed, and we
+ * need both params to be set correctly before trying to
+ * create a pool.
+ */
+ if (!strcmp(type, ZSWAP_PARAM_UNSET))
+ return NULL;
+ if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
+ return NULL;
+ }
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool) {
@@ -593,7 +537,9 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
goto error;
}
- if (zswap_cpu_comp_init(pool))
+ ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
+ &pool->node);
+ if (ret)
goto error;
pr_debug("using %s compressor\n", pool->tfm_name);
@@ -617,29 +563,41 @@ error:
static __init struct zswap_pool *__zswap_pool_create_fallback(void)
{
- if (!crypto_has_comp(zswap_compressor, 0, 0)) {
- if (!strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
- pr_err("default compressor %s not available\n",
- zswap_compressor);
- return NULL;
- }
+ bool has_comp, has_zpool;
+
+ has_comp = crypto_has_comp(zswap_compressor, 0, 0);
+ if (!has_comp && strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
pr_err("compressor %s not available, using default %s\n",
zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
param_free_charp(&zswap_compressor);
zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
+ has_comp = crypto_has_comp(zswap_compressor, 0, 0);
}
- if (!zpool_has_pool(zswap_zpool_type)) {
- if (!strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
- pr_err("default zpool %s not available\n",
- zswap_zpool_type);
- return NULL;
- }
+ if (!has_comp) {
+ pr_err("default compressor %s not available\n",
+ zswap_compressor);
+ param_free_charp(&zswap_compressor);
+ zswap_compressor = ZSWAP_PARAM_UNSET;
+ }
+
+ has_zpool = zpool_has_pool(zswap_zpool_type);
+ if (!has_zpool && strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
pr_err("zpool %s not available, using default %s\n",
zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
param_free_charp(&zswap_zpool_type);
zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
+ has_zpool = zpool_has_pool(zswap_zpool_type);
+ }
+ if (!has_zpool) {
+ pr_err("default zpool %s not available\n",
+ zswap_zpool_type);
+ param_free_charp(&zswap_zpool_type);
+ zswap_zpool_type = ZSWAP_PARAM_UNSET;
}
+ if (!has_comp || !has_zpool)
+ return NULL;
+
return zswap_pool_create(zswap_zpool_type, zswap_compressor);
}
@@ -647,7 +605,7 @@ static void zswap_pool_destroy(struct zswap_pool *pool)
{
zswap_pool_debug("destroying", pool);
- zswap_cpu_comp_destroy(pool);
+ cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
free_percpu(pool->tfm);
zpool_destroy_pool(pool->zpool);
kfree(pool);
@@ -655,6 +613,9 @@ static void zswap_pool_destroy(struct zswap_pool *pool)
static int __must_check zswap_pool_get(struct zswap_pool *pool)
{
+ if (!pool)
+ return 0;
+
return kref_get_unless_zero(&pool->kref);
}
@@ -706,8 +667,13 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
char *s = strstrip((char *)val);
int ret;
+ if (zswap_init_failed) {
+ pr_err("can't set param, initialization failed\n");
+ return -ENODEV;
+ }
+
/* no change required */
- if (!strcmp(s, *(char **)kp->arg))
+ if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
return 0;
/* if this is load-time (pre-init) param setting,
@@ -738,21 +704,26 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
pool = zswap_pool_find_get(type, compressor);
if (pool) {
zswap_pool_debug("using existing", pool);
+ WARN_ON(pool == zswap_pool_current());
list_del_rcu(&pool->list);
- } else {
- spin_unlock(&zswap_pools_lock);
- pool = zswap_pool_create(type, compressor);
- spin_lock(&zswap_pools_lock);
}
+ spin_unlock(&zswap_pools_lock);
+
+ if (!pool)
+ pool = zswap_pool_create(type, compressor);
+
if (pool)
ret = param_set_charp(s, kp);
else
ret = -EINVAL;
+ spin_lock(&zswap_pools_lock);
+
if (!ret) {
put_pool = zswap_pool_current();
list_add_rcu(&pool->list, &zswap_pools);
+ zswap_has_pool = true;
} else if (pool) {
/* add the possibly pre-existing pool to the end of the pools
* list; if it's new (and empty) then it'll be removed and
@@ -764,6 +735,17 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
spin_unlock(&zswap_pools_lock);
+ if (!zswap_has_pool && !pool) {
+ /* if initial pool creation failed, and this pool creation also
+ * failed, maybe both compressor and zpool params were bad.
+ * Allow changing this param, so pool creation will succeed
+ * when the other param is changed. We already verified this
+ * param is ok in the zpool_has_pool() or crypto_has_comp()
+ * checks above.
+ */
+ ret = param_set_charp(s, kp);
+ }
+
/* drop the ref from either the old current pool,
* or the new pool we failed to add
*/
@@ -785,6 +767,21 @@ static int zswap_zpool_param_set(const char *val,
return __zswap_param_set(val, kp, NULL, zswap_compressor);
}
+static int zswap_enabled_param_set(const char *val,
+ const struct kernel_param *kp)
+{
+ if (zswap_init_failed) {
+ pr_err("can't enable, initialization failed\n");
+ return -ENODEV;
+ }
+ if (!zswap_has_pool && zswap_init_started) {
+ pr_err("can't enable, no pool configured\n");
+ return -ENODEV;
+ }
+
+ return param_set_bool(val, kp);
+}
+
/*********************************
* writeback code
**********************************/
@@ -1238,6 +1235,7 @@ static void __exit zswap_debugfs_exit(void) { }
static int __init init_zswap(void)
{
struct zswap_pool *pool;
+ int ret;
zswap_init_started = true;
@@ -1246,31 +1244,44 @@ static int __init init_zswap(void)
goto cache_fail;
}
- if (zswap_cpu_dstmem_init()) {
+ ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
+ zswap_dstmem_prepare, zswap_dstmem_dead);
+ if (ret) {
pr_err("dstmem alloc failed\n");
goto dstmem_fail;
}
+ ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
+ "mm/zswap_pool:prepare",
+ zswap_cpu_comp_prepare,
+ zswap_cpu_comp_dead);
+ if (ret)
+ goto hp_fail;
+
pool = __zswap_pool_create_fallback();
- if (!pool) {
+ if (pool) {
+ pr_info("loaded using pool %s/%s\n", pool->tfm_name,
+ zpool_get_type(pool->zpool));
+ list_add(&pool->list, &zswap_pools);
+ zswap_has_pool = true;
+ } else {
pr_err("pool creation failed\n");
- goto pool_fail;
+ zswap_enabled = false;
}
- pr_info("loaded using pool %s/%s\n", pool->tfm_name,
- zpool_get_type(pool->zpool));
-
- list_add(&pool->list, &zswap_pools);
frontswap_register_ops(&zswap_frontswap_ops);
if (zswap_debugfs_init())
pr_warn("debugfs initialization failed\n");
return 0;
-pool_fail:
- zswap_cpu_dstmem_destroy();
+hp_fail:
+ cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
dstmem_fail:
zswap_entry_cache_destroy();
cache_fail:
+ /* if built-in, we aren't unloaded on failure; don't allow use */
+ zswap_init_failed = true;
+ zswap_enabled = false;
return -ENOMEM;
}
/* must be late so crypto has time to come up */