summaryrefslogtreecommitdiff
path: root/fs/xfs/scrub/array.h
diff options
context:
space:
mode:
authorDarrick J. Wong <darrick.wong@oracle.com>2019-01-16 10:12:05 -0800
committerDarrick J. Wong <darrick.wong@oracle.com>2019-02-04 09:31:13 -0800
commit54eb14450a9d85e44f324264cfda39673ff685f8 (patch)
treee44dfb08a50213dc46c578f48ee1b61750280509 /fs/xfs/scrub/array.h
parentbfae316eb34c7c0e5b223ee49ad01fe35f72d1f6 (diff)
xfs: convert big array and blob array to use memfd backendrepair-part-one_2019-02-04
There are several problems with the initial implementations of the big array and the blob array data structures. First, using linked lists imposes a two-pointer overhead on every record stored. For blobs this isn't serious, but for fixed-size records this increases memory requirements by 40-60%. Second, we're using kernel memory to store the intermediate records. Kernel memory cannot be paged out, which means we run the risk of OOMing the machine when we run out of physical memory. Therefore, replace the linked lists in both structures with memfd files. Random access becomes much easier, memory overhead drops to a negligible amount, and because memfd pages can be swapped, we have considerably more flexibility for memory use. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Diffstat (limited to 'fs/xfs/scrub/array.h')
-rw-r--r--fs/xfs/scrub/array.h16
1 files changed, 3 insertions, 13 deletions
diff --git a/fs/xfs/scrub/array.h b/fs/xfs/scrub/array.h
index a6a7b69dc138..b1b473bc56c5 100644
--- a/fs/xfs/scrub/array.h
+++ b/fs/xfs/scrub/array.h
@@ -6,20 +6,10 @@
#ifndef __XFS_SCRUB_ARRAY_H__
#define __XFS_SCRUB_ARRAY_H__
-struct xma_item;
-
-struct xma_cache {
- uint64_t nr;
- struct xa_item *item;
-};
-
-#define XMA_CACHE_SIZE (8)
-
struct xfbma {
- struct list_head list;
- size_t obj_size;
- uint64_t nr;
- struct xma_cache cache[XMA_CACHE_SIZE];
+ struct file *filp;
+ size_t obj_size;
+ uint64_t nr;
};
struct xfbma *xfbma_init(size_t obj_size);