summaryrefslogtreecommitdiff
path: root/drivers/char/ipmi
diff options
context:
space:
mode:
authorCorey Minyard <cminyard@mvista.com>2019-08-02 07:31:36 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-10-07 18:53:10 +0200
commit7522f96fa4cbdc88a4ca762c025396e581a1087b (patch)
treeb16d49e827d7e617339929f3784d02326fd51c54 /drivers/char/ipmi
parent59a8932a1fb63abb00a9e2cb6781d031d45b7c99 (diff)
ipmi_si: Only schedule continuously in the thread in maintenance mode
[ Upstream commit 340ff31ab00bca5c15915e70ad9ada3030c98cf8 ] ipmi_thread() uses back-to-back schedule() to poll for command completion which, on some machines, can push up CPU consumption and heavily tax the scheduler locks leading to noticeable overall performance degradation. This was originally added so firmware updates through IPMI would complete in a timely manner. But we can't kill the scheduler locks for that one use case. Instead, only run schedule() continuously in maintenance mode, where firmware updates should run. Signed-off-by: Corey Minyard <cminyard@mvista.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers/char/ipmi')
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c24
1 files changed, 19 insertions, 5 deletions
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index e0a53156b782..82af65818444 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -283,6 +283,9 @@ struct smi_info {
*/
bool irq_enable_broken;
+ /* Is the driver in maintenance mode? */
+ bool in_maintenance_mode;
+
/*
* Did we get an attention that we did not handle?
*/
@@ -1093,11 +1096,20 @@ static int ipmi_thread(void *data)
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
&busy_until);
- if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
+ if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
; /* do nothing */
- else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
- schedule();
- else if (smi_result == SI_SM_IDLE) {
+ } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
+ /*
+ * In maintenance mode we run as fast as
+ * possible to allow firmware updates to
+ * complete as fast as possible, but normally
+ * don't bang on the scheduler.
+ */
+ if (smi_info->in_maintenance_mode)
+ schedule();
+ else
+ usleep_range(100, 200);
+ } else if (smi_result == SI_SM_IDLE) {
if (atomic_read(&smi_info->need_watch)) {
schedule_timeout_interruptible(100);
} else {
@@ -1105,8 +1117,9 @@ static int ipmi_thread(void *data)
__set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
- } else
+ } else {
schedule_timeout_interruptible(1);
+ }
}
return 0;
}
@@ -1285,6 +1298,7 @@ static void set_maintenance_mode(void *send_info, bool enable)
if (!enable)
atomic_set(&smi_info->req_events, 0);
+ smi_info->in_maintenance_mode = enable;
}
static const struct ipmi_smi_handlers handlers = {