summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-03-28 19:38:20 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2018-12-27 11:15:25 -0500
commitcd2e6731424027c894677120e5b2096ab2cc60d6 (patch)
treec47fc72cdc3fc04cb10cd765c70fe39a7d934e55 /include
parente5a743723a496a0f548eb818762d9e1a7f973473 (diff)
mm: pagecache add lock
Add a per address space lock around adding pages to the pagecache - making it possible for fallocate INSERT_RANGE/COLLAPSE_RANGE to work correctly, and also hopefully making truncate and dio a bit saner.
Diffstat (limited to 'include')
-rw-r--r--include/linux/fs.h24
-rw-r--r--include/linux/sched.h4
2 files changed, 28 insertions, 0 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 897eae8faee1..439818420857 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -403,6 +403,28 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
+/*
+ * Two-state lock - can be taken for add or block - both states are shared,
+ * like read side of rwsem, but conflict with other state:
+ */
+struct pagecache_lock {
+ atomic_long_t v;
+ wait_queue_head_t wait;
+};
+
+static inline void pagecache_lock_init(struct pagecache_lock *lock)
+{
+ atomic_long_set(&lock->v, 0);
+ init_waitqueue_head(&lock->wait);
+}
+
+void pagecache_add_put(struct pagecache_lock *);
+void pagecache_add_get(struct pagecache_lock *);
+void __pagecache_block_put(struct pagecache_lock *);
+void __pagecache_block_get(struct pagecache_lock *);
+void pagecache_block_put(struct pagecache_lock *);
+void pagecache_block_get(struct pagecache_lock *);
+
struct address_space {
struct inode *host; /* owner: inode, block_device */
struct radix_tree_root i_pages; /* cached pages */
@@ -421,6 +443,8 @@ struct address_space {
struct list_head private_list; /* for use by the address_space */
void *private_data; /* ditto */
errseq_t wb_err;
+ struct pagecache_lock add_lock
+ ____cacheline_aligned_in_smp; /* protects adding new pages */
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 977cb57d7bc9..3a5382a321bd 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -41,6 +41,7 @@ struct io_context;
struct mempolicy;
struct nameidata;
struct nsproxy;
+struct pagecache_lock;
struct perf_event_context;
struct pid_namespace;
struct pipe_inode_info;
@@ -940,6 +941,9 @@ struct task_struct {
unsigned int in_ubsan;
#endif
+ /* currently held lock, for avoiding recursing in fault path: */
+ struct pagecache_lock *pagecache_lock;
+
/* Journalling filesystem info: */
void *journal_info;