summaryrefslogtreecommitdiff
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2022-12-09 18:28:20 +0100
committerAndreas Gruenbacher <agruenba@redhat.com>2023-01-31 22:40:24 +0100
commit3056dc46559bfe3fc4b79771dcbc2d003f9fd313 (patch)
tree7a34174aadbcdf5bc3e65e975b45b6a349be448c /fs/gfs2/glock.c
parent228804a35caa7edae4a81049281e7f106dea1ad1 (diff)
gfs2: Get rid of GLF_PENDING_DELETE flag
Get rid of the GLF_PENDING_DELETE glock flag introduced by commit a0e3cc65fa29 ("gfs2: Turn gl_delete into a delayed work"). The only use of that flag is to prevent the iopen glock from being demoted (i.e., unlocked) while delete work is pending. It turns out that demoting the iopen glock while delete work is pending is perfectly fine; we only need to make sure that the glock isn't being freed while still in use. This is ensured by the previous patch because delete_work_func() owns a reference while the work is queued or running. With these changes, gfs2_queue_delete_work() no longer takes the glock spin lock, so we can use it in iopen_go_callback() instead of open-coding it there. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c26
1 files changed, 3 insertions, 23 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index fbfbf7a2feac..8d55616488aa 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -984,10 +984,6 @@ static void delete_work_func(struct work_struct *work)
struct inode *inode;
u64 no_addr = gl->gl_name.ln_number;
- spin_lock(&gl->gl_lockref.lock);
- clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
- spin_unlock(&gl->gl_lockref.lock);
-
if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
/*
* If we can evict the inode, give the remote node trying to
@@ -2064,28 +2060,14 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay)
{
- bool queued;
-
- spin_lock(&gl->gl_lockref.lock);
- queued = queue_delayed_work(gfs2_delete_workqueue,
- &gl->gl_delete, delay);
- if (queued)
- set_bit(GLF_PENDING_DELETE, &gl->gl_flags);
- spin_unlock(&gl->gl_lockref.lock);
- return queued;
+ return queue_delayed_work(gfs2_delete_workqueue,
+ &gl->gl_delete, delay);
}
void gfs2_cancel_delete_work(struct gfs2_glock *gl)
{
- if (cancel_delayed_work(&gl->gl_delete)) {
- clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+ if (cancel_delayed_work(&gl->gl_delete))
gfs2_glock_put(gl);
- }
-}
-
-bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
-{
- return test_bit(GLF_PENDING_DELETE, &gl->gl_flags);
}
static void flush_delete_work(struct gfs2_glock *gl)
@@ -2307,8 +2289,6 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'o';
if (test_bit(GLF_BLOCKING, gflags))
*p++ = 'b';
- if (test_bit(GLF_PENDING_DELETE, gflags))
- *p++ = 'P';
if (test_bit(GLF_FREEING, gflags))
*p++ = 'x';
if (test_bit(GLF_INSTANTIATE_NEEDED, gflags))