From 1431a5d2cfa18d7006d9b0e7ab4548d9bb19ce55 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 6 Dec 2018 17:11:32 -0800 Subject: locking/lockdep: Declare local symbols static This patch avoids that sparse complains about a missing declaration for the lock_classes array when building with CONFIG_DEBUG_LOCKDEP=n. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Johannes Berg Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sasha Levin Cc: Thomas Gleixner Cc: Waiman Long Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20181207011148.251812-9-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 1efada2dd9dd..7434a00b2b2f 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -138,6 +138,9 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; * get freed - this significantly simplifies the debugging code. */ unsigned long nr_lock_classes; +#ifndef CONFIG_DEBUG_LOCKDEP +static +#endif struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; static inline struct lock_class *hlock_class(struct held_lock *hlock) -- cgit v1.2.3 From d35568bdb6ce4be3f885f8f189bbde5adc7e0160 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 6 Dec 2018 17:11:33 -0800 Subject: locking/lockdep: Inline __lockdep_init_map() Since the function __lockdep_init_map() only has one caller, inline it into its caller. This patch does not change any functionality. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Johannes Berg Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sasha Levin Cc: Thomas Gleixner Cc: Waiman Long Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20181207011148.251812-10-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 7434a00b2b2f..b5c8fcb6c070 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3091,7 +3091,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, /* * Initialize a lock instance's lock-class mapping info: */ -static void __lockdep_init_map(struct lockdep_map *lock, const char *name, +void lockdep_init_map(struct lockdep_map *lock, const char *name, struct lock_class_key *key, int subclass) { int i; @@ -3147,12 +3147,6 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name, raw_local_irq_restore(flags); } } - -void lockdep_init_map(struct lockdep_map *lock, const char *name, - struct lock_class_key *key, int subclass) -{ - __lockdep_init_map(lock, name, key, subclass); -} EXPORT_SYMBOL_GPL(lockdep_init_map); struct lock_class_key __lockdep_no_validate__; -- cgit v1.2.3 From 2904d9fa45d3ce7153f1e10d78c570ecf7f19c35 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 6 Dec 2018 17:11:34 -0800 Subject: locking/lockdep: Introduce lock_class_cache_is_registered() This patch does not change any functionality but makes the lockdep_reset_lock() function easier to read. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Johannes Berg Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sasha Levin Cc: Thomas Gleixner Cc: Waiman Long Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20181207011148.251812-11-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 50 +++++++++++++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 20 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index b5c8fcb6c070..81388d028ac7 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -4201,13 +4201,33 @@ void lockdep_free_key_range(void *start, unsigned long size) */ } -void lockdep_reset_lock(struct lockdep_map *lock) +/* + * Check whether any element of the @lock->class_cache[] array refers to a + * registered lock class. The caller must hold either the graph lock or the + * RCU read lock. + */ +static bool lock_class_cache_is_registered(struct lockdep_map *lock) { struct lock_class *class; struct hlist_head *head; - unsigned long flags; int i, j; - int locked; + + for (i = 0; i < CLASSHASH_SIZE; i++) { + head = classhash_table + i; + hlist_for_each_entry_rcu(class, head, hash_entry) { + for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) + if (lock->class_cache[j] == class) + return true; + } + } + return false; +} + +void lockdep_reset_lock(struct lockdep_map *lock) +{ + struct lock_class *class; + unsigned long flags; + int j, locked; raw_local_irq_save(flags); @@ -4227,24 +4247,14 @@ void lockdep_reset_lock(struct lockdep_map *lock) * be gone. */ locked = graph_lock(); - for (i = 0; i < CLASSHASH_SIZE; i++) { - head = classhash_table + i; - hlist_for_each_entry_rcu(class, head, hash_entry) { - int match = 0; - - for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) - match |= class == lock->class_cache[j]; - - if (unlikely(match)) { - if (debug_locks_off_graph_unlock()) { - /* - * We all just reset everything, how did it match? - */ - WARN_ON(1); - } - goto out_restore; - } + if (unlikely(lock_class_cache_is_registered(lock))) { + if (debug_locks_off_graph_unlock()) { + /* + * We all just reset everything, how did it match? + */ + WARN_ON(1); } + goto out_restore; } if (locked) graph_unlock(); -- cgit v1.2.3 From a66b6922dc6a5ece60ea9326153da3b062977a4d Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 6 Dec 2018 17:11:35 -0800 Subject: locking/lockdep: Remove a superfluous INIT_LIST_HEAD() statement Initializing a list entry just before it is passed to list_add_tail_rcu() is not necessary because list_add_tail_rcu() overwrites the next and prev pointers anyway. Hence remove the INIT_LIST_HEAD() statement. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Johannes Berg Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sasha Levin Cc: Thomas Gleixner Cc: Waiman Long Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20181207011148.251812-12-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 81388d028ac7..346b5a1fd062 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -792,7 +792,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) class->key = key; class->name = lock->name; class->subclass = subclass; - INIT_LIST_HEAD(&class->lock_entry); INIT_LIST_HEAD(&class->locks_before); INIT_LIST_HEAD(&class->locks_after); class->name_version = count_matching_names(class); -- cgit v1.2.3 From 786fa29e9cb6810e21ab0d9c41a81d81d54d1d1b Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 6 Dec 2018 17:11:36 -0800 Subject: locking/lockdep: Make concurrent lockdep_reset_lock() calls safe Since zap_class() removes items from the all_lock_classes list and the classhash_table, protect all zap_class() calls against concurrent data structure modifications with the graph lock. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Johannes Berg Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sasha Levin Cc: Thomas Gleixner Cc: Waiman Long Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20181207011148.251812-13-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 346b5a1fd062..737d2dd3ea56 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -4122,6 +4122,9 @@ void lockdep_reset(void) raw_local_irq_restore(flags); } +/* + * Remove all references to a lock class. The caller must hold the graph lock. + */ static void zap_class(struct lock_class *class) { int i; @@ -4229,6 +4232,7 @@ void lockdep_reset_lock(struct lockdep_map *lock) int j, locked; raw_local_irq_save(flags); + locked = graph_lock(); /* * Remove all classes this lock might have: @@ -4245,7 +4249,6 @@ void lockdep_reset_lock(struct lockdep_map *lock) * Debug check: in the end all mapped classes should * be gone. */ - locked = graph_lock(); if (unlikely(lock_class_cache_is_registered(lock))) { if (debug_locks_off_graph_unlock()) { /* -- cgit v1.2.3 From fe27b0de8dfcdf8482558ce5d25e697fe74d851e Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 6 Dec 2018 17:11:37 -0800 Subject: locking/lockdep: Stop using RCU primitives to access 'all_lock_classes' Due to the previous patch all code that accesses the 'all_lock_classes' list holds the graph lock. Hence use regular list primitives instead of their RCU variants to access this list. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Johannes Berg Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sasha Levin Cc: Thomas Gleixner Cc: Waiman Long Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20181207011148.251812-14-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 737d2dd3ea56..5c837a537273 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -629,7 +629,8 @@ static int static_obj(void *obj) /* * To make lock name printouts unique, we calculate a unique - * class->name_version generation counter: + * class->name_version generation counter. The caller must hold the graph + * lock. */ static int count_matching_names(struct lock_class *new_class) { @@ -639,7 +640,7 @@ static int count_matching_names(struct lock_class *new_class) if (!new_class->name) return 0; - list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) { + list_for_each_entry(class, &all_lock_classes, lock_entry) { if (new_class->key - new_class->subclass == class->key) return class->name_version; if (class->name && !strcmp(class->name, new_class->name)) @@ -803,7 +804,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) /* * Add it to the global list of classes: */ - list_add_tail_rcu(&class->lock_entry, &all_lock_classes); + list_add_tail(&class->lock_entry, &all_lock_classes); if (verbose(class)) { graph_unlock(); @@ -4141,7 +4142,7 @@ static void zap_class(struct lock_class *class) * Unhash the class and remove it from the all_lock_classes list: */ hlist_del_rcu(&class->hash_entry); - list_del_rcu(&class->lock_entry); + list_del(&class->lock_entry); RCU_INIT_POINTER(class->key, NULL); RCU_INIT_POINTER(class->name, NULL); -- cgit v1.2.3 From 80eb865768703c0f85a0603762742ae1dedf21f0 Mon Sep 17 00:00:00 2001 From: Andrea Parri Date: Tue, 27 Nov 2018 12:01:10 +0100 Subject: sched/fair: Clean up comment in nohz_idle_balance() Concerning the comment associated to the atomic_fetch_andnot() in nohz_idle_balance(), Vincent explains [1]: "[...] the comment is useless and can be removed [...] it was referring to a line code above the comment that was present in a previous iteration of the patchset. This line disappeared in final version but the comment has stayed." So remove the comment. Vincent also points out that the full ordering associated to the atomic_fetch_andnot() primitive could be relaxed, but this patch insists on the current more conservative/fully ordered solution: "Performance" isn't a concern, stay away from "correctness"/subtle relaxed (re)ordering if possible..., just make sure not to confuse the next reader with misleading/out-of-date comments. [1] http://lkml.kernel.org/r/CAKfTPtBjA-oCBRkO6__npQwL3+HLjzk7riCcPU1R7YdO-EpuZg@mail.gmail.com Suggested-by: Vincent Guittot Signed-off-by: Andrea Parri Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: https://lkml.kernel.org/r/20181127110110.5533-1-andrea.parri@amarulasolutions.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ac855b2f4774..db514993565b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9533,9 +9533,7 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) return false; } - /* - * barrier, pairs with nohz_balance_enter_idle(), ensures ... - */ + /* could be _relaxed() */ flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); if (!(flags & NOHZ_KICK_MASK)) return false; -- cgit v1.2.3