summaryrefslogtreecommitdiff
path: root/mm/damon
diff options
context:
space:
mode:
Diffstat (limited to 'mm/damon')
-rw-r--r--mm/damon/core.c24
-rw-r--r--mm/damon/dbgfs.c2
-rw-r--r--mm/damon/sysfs.c2
-rw-r--r--mm/damon/tests/.kunitconfig22
-rw-r--r--mm/damon/tests/core-kunit.h (renamed from mm/damon/core-test.h)35
-rw-r--r--mm/damon/tests/dbgfs-kunit.h (renamed from mm/damon/dbgfs-test.h)10
-rw-r--r--mm/damon/tests/sysfs-kunit.h (renamed from mm/damon/sysfs-test.h)0
-rw-r--r--mm/damon/tests/vaddr-kunit.h (renamed from mm/damon/vaddr-test.h)2
-rw-r--r--mm/damon/vaddr.c4
9 files changed, 86 insertions, 15 deletions
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 7a87628b76ab..a83f3b736d51 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -552,7 +552,13 @@ static unsigned int damon_accesses_bp_to_nr_accesses(
return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
}
-/* convert nr_accesses to access ratio in bp (per 10,000) */
+/*
+ * Convert nr_accesses to access ratio in bp (per 10,000).
+ *
+ * Callers should ensure attrs.aggr_interval is not zero, like
+ * damon_update_monitoring_results() does . Otherwise, divide-by-zero would
+ * happen.
+ */
static unsigned int damon_nr_accesses_to_accesses_bp(
unsigned int nr_accesses, struct damon_attrs *attrs)
{
@@ -1582,13 +1588,16 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
return;
/* Fill up the score histogram */
- memset(quota->histogram, 0, sizeof(quota->histogram));
+ memset(c->regions_score_histogram, 0,
+ sizeof(*c->regions_score_histogram) *
+ (DAMOS_MAX_SCORE + 1));
damon_for_each_target(t, c) {
damon_for_each_region(r, t) {
if (!__damos_valid_target(r, s))
continue;
score = c->ops.get_scheme_score(c, t, r, s);
- quota->histogram[score] += damon_sz_region(r);
+ c->regions_score_histogram[score] +=
+ damon_sz_region(r);
if (score > max_score)
max_score = score;
}
@@ -1596,7 +1605,7 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
/* Set the min score limit */
for (cumulated_sz = 0, score = max_score; ; score--) {
- cumulated_sz += quota->histogram[score];
+ cumulated_sz += c->regions_score_histogram[score];
if (cumulated_sz >= quota->esz || !score)
break;
}
@@ -1957,6 +1966,10 @@ static int kdamond_fn(void *data)
ctx->ops.init(ctx);
if (ctx->callback.before_start && ctx->callback.before_start(ctx))
goto done;
+ ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1,
+ sizeof(*ctx->regions_score_histogram), GFP_KERNEL);
+ if (!ctx->regions_score_histogram)
+ goto done;
sz_limit = damon_region_sz_limit(ctx);
@@ -2034,6 +2047,7 @@ done:
ctx->callback.before_terminate(ctx);
if (ctx->ops.cleanup)
ctx->ops.cleanup(ctx);
+ kfree(ctx->regions_score_histogram);
pr_debug("kdamond (%d) finishes\n", current->pid);
mutex_lock(&ctx->kdamond_lock);
@@ -2205,4 +2219,4 @@ static int __init damon_init(void)
subsys_initcall(damon_init);
-#include "core-test.h"
+#include "tests/core-kunit.h"
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
index 51a6f1cac385..b4213bc47e44 100644
--- a/mm/damon/dbgfs.c
+++ b/mm/damon/dbgfs.c
@@ -1145,4 +1145,4 @@ out:
module_init(damon_dbgfs_init);
-#include "dbgfs-test.h"
+#include "tests/dbgfs-kunit.h"
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index cffc755e7775..58145d59881d 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1882,4 +1882,4 @@ out:
}
subsys_initcall(damon_sysfs_init);
-#include "sysfs-test.h"
+#include "tests/sysfs-kunit.h"
diff --git a/mm/damon/tests/.kunitconfig b/mm/damon/tests/.kunitconfig
new file mode 100644
index 000000000000..a73be044fc9b
--- /dev/null
+++ b/mm/damon/tests/.kunitconfig
@@ -0,0 +1,22 @@
+# for DAMON core
+CONFIG_KUNIT=y
+CONFIG_DAMON=y
+CONFIG_DAMON_KUNIT_TEST=y
+
+# for DAMON vaddr ops
+CONFIG_MMU=y
+CONFIG_PAGE_IDLE_FLAG=y
+CONFIG_DAMON_VADDR=y
+CONFIG_DAMON_VADDR_KUNIT_TEST=y
+
+# for DAMON sysfs interface
+CONFIG_SYSFS=y
+CONFIG_DAMON_SYSFS=y
+CONFIG_DAMON_SYSFS_KUNIT_TEST=y
+
+# for DAMON debugfs interface
+CONFIG_DEBUG_FS=y
+CONFIG_DAMON_PADDR=y
+CONFIG_DAMON_DBGFS_DEPRECATED=y
+CONFIG_DAMON_DBGFS=y
+CONFIG_DAMON_DBGFS_KUNIT_TEST=y
diff --git a/mm/damon/core-test.h b/mm/damon/tests/core-kunit.h
index 0cee634f3544..cf22e09a3507 100644
--- a/mm/damon/core-test.h
+++ b/mm/damon/tests/core-kunit.h
@@ -246,16 +246,20 @@ static void damon_test_split_regions_of(struct kunit *test)
static void damon_test_ops_registration(struct kunit *test)
{
struct damon_ctx *c = damon_new_ctx();
- struct damon_operations ops, bak;
+ struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak;
+ bool need_cleanup = false;
+
+ /* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */
+ if (!damon_is_registered_ops(DAMON_OPS_VADDR)) {
+ bak.id = DAMON_OPS_VADDR;
+ KUNIT_EXPECT_EQ(test, damon_register_ops(&bak), 0);
+ need_cleanup = true;
+ }
- /* DAMON_OPS_{V,P}ADDR are registered on subsys_initcall */
+ /* DAMON_OPS_VADDR is ensured to be registered */
KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0);
- KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_PADDR), 0);
/* Double-registration is prohibited */
- ops.id = DAMON_OPS_VADDR;
- KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
- ops.id = DAMON_OPS_PADDR;
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
/* Unknown ops id cannot be registered */
@@ -278,6 +282,13 @@ static void damon_test_ops_registration(struct kunit *test)
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
damon_destroy_ctx(c);
+
+ if (need_cleanup) {
+ mutex_lock(&damon_ops_lock);
+ damon_registered_ops[DAMON_OPS_VADDR] =
+ (struct damon_operations){};
+ mutex_unlock(&damon_ops_lock);
+ }
}
static void damon_test_set_regions(struct kunit *test)
@@ -309,6 +320,18 @@ static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test)
.aggr_interval = ((unsigned long)UINT_MAX + 1) * 10
};
+ /*
+ * In some cases such as 32bit architectures where UINT_MAX is
+ * ULONG_MAX, attrs.aggr_interval becomes zero. Calling
+ * damon_nr_accesses_to_accesses_bp() in the case will cause
+ * divide-by-zero. Such case is prohibited in normal execution since
+ * the caution is documented on the comment for the function, and
+ * damon_update_monitoring_results() does the check. Skip the test in
+ * the case.
+ */
+ if (!attrs.aggr_interval)
+ kunit_skip(test, "aggr_interval is zero.");
+
KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0);
}
diff --git a/mm/damon/dbgfs-test.h b/mm/damon/tests/dbgfs-kunit.h
index 2d85217f5ba4..d2ecfcc8db86 100644
--- a/mm/damon/dbgfs-test.h
+++ b/mm/damon/tests/dbgfs-kunit.h
@@ -73,6 +73,11 @@ static void damon_dbgfs_test_set_targets(struct kunit *test)
struct damon_ctx *ctx = dbgfs_new_ctx();
char buf[64];
+ if (!damon_is_registered_ops(DAMON_OPS_PADDR)) {
+ dbgfs_destroy_ctx(ctx);
+ kunit_skip(test, "PADDR not registered");
+ }
+
/* Make DAMON consider target has no pid */
damon_select_ops(ctx, DAMON_OPS_PADDR);
@@ -111,6 +116,11 @@ static void damon_dbgfs_test_set_init_regions(struct kunit *test)
int i, rc;
char buf[256];
+ if (!damon_is_registered_ops(DAMON_OPS_PADDR)) {
+ damon_destroy_ctx(ctx);
+ kunit_skip(test, "PADDR not registered");
+ }
+
damon_select_ops(ctx, DAMON_OPS_PADDR);
dbgfs_set_targets(ctx, 3, NULL);
diff --git a/mm/damon/sysfs-test.h b/mm/damon/tests/sysfs-kunit.h
index 1c9b596057a7..1c9b596057a7 100644
--- a/mm/damon/sysfs-test.h
+++ b/mm/damon/tests/sysfs-kunit.h
diff --git a/mm/damon/vaddr-test.h b/mm/damon/tests/vaddr-kunit.h
index 83626483f82b..a339d117150f 100644
--- a/mm/damon/vaddr-test.h
+++ b/mm/damon/tests/vaddr-kunit.h
@@ -77,7 +77,7 @@ static void damon_test_three_regions_in_vmas(struct kunit *test)
(struct vm_area_struct) {.vm_start = 307, .vm_end = 330},
};
- mt_init_flags(&mm.mm_mt, MM_MT_FLAGS);
+ mt_init_flags(&mm.mm_mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_USE_RCU);
if (__link_vmas(&mm.mm_mt, vmas, ARRAY_SIZE(vmas)))
kunit_skip(test, "Failed to create VMA tree");
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 58829baf8b5d..08cfd22b5249 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -126,6 +126,7 @@ static int __damon_va_three_regions(struct mm_struct *mm,
* If this is too slow, it can be optimised to examine the maple
* tree gaps.
*/
+ rcu_read_lock();
for_each_vma(vmi, vma) {
unsigned long gap;
@@ -146,6 +147,7 @@ static int __damon_va_three_regions(struct mm_struct *mm,
next:
prev = vma;
}
+ rcu_read_unlock();
if (!sz_range(&second_gap) || !sz_range(&first_gap))
return -EINVAL;
@@ -730,4 +732,4 @@ static int __init damon_va_initcall(void)
subsys_initcall(damon_va_initcall);
-#include "vaddr-test.h"
+#include "tests/vaddr-kunit.h"