From 1a229d8690a0f8951fc4aa8b76a7efab0d8de342 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 6 Nov 2023 12:06:54 +0100 Subject: Revert "usb: phy: add usb phy notify port status API" This reverts commit a08799cf17c22375752abfad3b4a2b34b3acb287. The recently added Realtek PHY drivers depend on the new port status notification mechanism which was built on the deprecated USB PHY implementation and devicetree binding. Specifically, using these PHYs would require describing the very same PHY using both the generic "phy" property and the deprecated "usb-phy" property which is clearly wrong. We should not be building new functionality on top of the legacy USB PHY implementation even if it is currently stuck in some kind of transitional limbo. Revert the new notification interface which is broken by design. Fixes: a08799cf17c2 ("usb: phy: add usb phy notify port status API") Cc: stable@vger.kernel.org # 6.6 Cc: Stanley Chang Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20231106110654.31090-4-johan+linaro@kernel.org Signed-off-by: Greg Kroah-Hartman --- include/linux/usb/phy.h | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'include') diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h index b513749582d7..e4de6bc1f69b 100644 --- a/include/linux/usb/phy.h +++ b/include/linux/usb/phy.h @@ -144,10 +144,6 @@ struct usb_phy { */ int (*set_wakeup)(struct usb_phy *x, bool enabled); - /* notify phy port status change */ - int (*notify_port_status)(struct usb_phy *x, int port, - u16 portstatus, u16 portchange); - /* notify phy connect status change */ int (*notify_connect)(struct usb_phy *x, enum usb_device_speed speed); @@ -320,15 +316,6 @@ usb_phy_set_wakeup(struct usb_phy *x, bool enabled) return 0; } -static inline int -usb_phy_notify_port_status(struct usb_phy *x, int port, u16 portstatus, u16 portchange) -{ - if (x && x->notify_port_status) - return x->notify_port_status(x, port, portstatus, portchange); - else - return 0; -} - static inline int usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed) { -- cgit v1.2.3 From 5d33213fac5929a2e7766c88d78779fd443b0fe8 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 3 Nov 2023 10:39:24 +0300 Subject: media: v4l2-subdev: Fix a 64bit bug The problem is this line here from subdev_do_ioctl(). client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS; The "client_cap->capabilities" variable is a u64. The AND operation is supposed to clear out the V4L2_SUBDEV_CLIENT_CAP_STREAMS flag. But because it's a 32 bit variable it accidentally clears out the high 32 bits as well. Currently we only use the first bit and none of the upper bits so this doesn't affect runtime behavior. Fixes: f57fa2959244 ("media: v4l2-subdev: Add new ioctl for client capabilities") Signed-off-by: Dan Carpenter Reviewed-by: Tomi Valkeinen Signed-off-by: Hans Verkuil --- include/uapi/linux/v4l2-subdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h index 4a195b68f28f..b383c2fe0cf3 100644 --- a/include/uapi/linux/v4l2-subdev.h +++ b/include/uapi/linux/v4l2-subdev.h @@ -239,7 +239,7 @@ struct v4l2_subdev_routing { * set (which is the default), the 'stream' fields will be forced to 0 by the * kernel. */ - #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1U << 0) + #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0) /** * struct v4l2_subdev_client_capability - Capabilities of the client accessing -- cgit v1.2.3 From d04ce4113cb4e5c2deddcb161db42a25917f285f Mon Sep 17 00:00:00 2001 From: Stefan Binding Date: Fri, 17 Nov 2023 16:36:09 +0000 Subject: ALSA: cs35l41: Fix for old systems which do not support command Some older laptops using cs35l41 use firmware which does not support the CSPL_MBOX_CMD_SPK_OUT_ENABLE command. Firmware versions v0.28.0 and older do not support this command. Fixes: fa3efcc36aac ("ALSA: cs35l41: Use mbox command to enable speaker output for external boost") Signed-off-by: Stefan Binding Link: https://lore.kernel.org/r/20231117163609.823627-3-sbinding@opensource.cirrus.com Signed-off-by: Takashi Iwai --- include/sound/cs35l41.h | 2 +- sound/pci/hda/cs35l41_hda.c | 4 ++-- sound/soc/codecs/cs35l41-lib.c | 6 ++++-- sound/soc/codecs/cs35l41.c | 4 ++-- 4 files changed, 9 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h index 043f8ac65dbf..68e053fe7340 100644 --- a/include/sound/cs35l41.h +++ b/include/sound/cs35l41.h @@ -906,6 +906,6 @@ int cs35l41_init_boost(struct device *dev, struct regmap *regmap, bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type); int cs35l41_mdsync_up(struct regmap *regmap); int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type, - int enable, bool firmware_running); + int enable, struct cs_dsp *dsp); #endif /* __CS35L41_H */ diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c index b7d2aa18c566..cbd7d8badf91 100644 --- a/sound/pci/hda/cs35l41_hda.c +++ b/sound/pci/hda/cs35l41_hda.c @@ -598,7 +598,7 @@ static void cs35l41_hda_play_done(struct device *dev) dev_dbg(dev, "Play (Complete)\n"); cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 1, - cs35l41->cs_dsp.running); + &cs35l41->cs_dsp); cs35l41_mute(dev, false); } @@ -611,7 +611,7 @@ static void cs35l41_hda_pause_start(struct device *dev) cs35l41_mute(dev, true); cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 0, - cs35l41->cs_dsp.running); + &cs35l41->cs_dsp); } static void cs35l41_hda_pause_done(struct device *dev) diff --git a/sound/soc/codecs/cs35l41-lib.c b/sound/soc/codecs/cs35l41-lib.c index 4569e4f7cf7e..e9993a39f7d0 100644 --- a/sound/soc/codecs/cs35l41-lib.c +++ b/sound/soc/codecs/cs35l41-lib.c @@ -16,6 +16,8 @@ #include +#define CS35L41_FIRMWARE_OLD_VERSION 0x001C00 /* v0.28.0 */ + static const struct reg_default cs35l41_reg[] = { { CS35L41_PWR_CTRL1, 0x00000000 }, { CS35L41_PWR_CTRL2, 0x00000000 }, @@ -1214,7 +1216,7 @@ EXPORT_SYMBOL_GPL(cs35l41_safe_reset); * the PLL Lock interrupt, in the IRQ handler. */ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type, - int enable, bool firmware_running) + int enable, struct cs_dsp *dsp) { int ret; unsigned int gpio1_func, pad_control, pwr_ctrl1, pwr_ctrl3, int_status, pup_pdn_mask; @@ -1309,7 +1311,7 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4 } regmap_write(regmap, CS35L41_IRQ1_STATUS1, CS35L41_PUP_DONE_MASK); - if (firmware_running) + if (dsp->running && dsp->fw_id_version > CS35L41_FIRMWARE_OLD_VERSION) ret = cs35l41_set_cspl_mbox_cmd(dev, regmap, CSPL_MBOX_CMD_SPK_OUT_ENABLE); else diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c index d0e9128ac6d0..dfb4ce53491b 100644 --- a/sound/soc/codecs/cs35l41.c +++ b/sound/soc/codecs/cs35l41.c @@ -519,11 +519,11 @@ static int cs35l41_main_amp_event(struct snd_soc_dapm_widget *w, ARRAY_SIZE(cs35l41_pup_patch)); ret = cs35l41_global_enable(cs35l41->dev, cs35l41->regmap, cs35l41->hw_cfg.bst_type, - 1, cs35l41->dsp.cs_dsp.running); + 1, &cs35l41->dsp.cs_dsp); break; case SND_SOC_DAPM_POST_PMD: ret = cs35l41_global_enable(cs35l41->dev, cs35l41->regmap, cs35l41->hw_cfg.bst_type, - 0, cs35l41->dsp.cs_dsp.running); + 0, &cs35l41->dsp.cs_dsp); regmap_multi_reg_write_bypassed(cs35l41->regmap, cs35l41_pdn_patch, -- cgit v1.2.3 From 37ba91a82e3b9de35f64348c62b5ec7d74e3a41c Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sun, 12 Nov 2023 21:36:26 +0100 Subject: ACPI: PM: Add acpi_device_fix_up_power_children() function In some cases it is necessary to fix-up the power-state of an ACPI device's children without touching the ACPI device itself add a new acpi_device_fix_up_power_children() function for this. Signed-off-by: Hans de Goede Cc: 6.6+ # 6.6+ Signed-off-by: Rafael J. Wysocki --- drivers/acpi/device_pm.c | 13 +++++++++++++ include/acpi/acpi_bus.h | 1 + 2 files changed, 14 insertions(+) (limited to 'include') diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index f007116a8427..3b4d048c4941 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -397,6 +397,19 @@ void acpi_device_fix_up_power_extended(struct acpi_device *adev) } EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_extended); +/** + * acpi_device_fix_up_power_children - Force a device's children into D0. + * @adev: Parent device object whose children's power state is to be fixed up. + * + * Call acpi_device_fix_up_power() for @adev's children so long as they + * are reported as present and enabled. + */ +void acpi_device_fix_up_power_children(struct acpi_device *adev) +{ + acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL); +} +EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_children); + int acpi_device_update_power(struct acpi_device *device, int *state_p) { int state; diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index afeed6e72049..1216d72c650f 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -542,6 +542,7 @@ int acpi_device_set_power(struct acpi_device *device, int state); int acpi_bus_init_power(struct acpi_device *device); int acpi_device_fix_up_power(struct acpi_device *device); void acpi_device_fix_up_power_extended(struct acpi_device *adev); +void acpi_device_fix_up_power_children(struct acpi_device *adev); int acpi_bus_update_power(acpi_handle handle, int *state_p); int acpi_device_update_power(struct acpi_device *device, int *state_p); bool acpi_bus_power_manageable(acpi_handle handle); -- cgit v1.2.3 From 35732699f5d2922ff674e711e566cf44a4bd86d2 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 22 Nov 2023 08:33:53 -0700 Subject: ACPI: Fix ARM32 platforms compile issue introduced by fw_table changes Linus reported that: After commit a103f46633fd the kernel stopped compiling for several ARM32 platforms that I am building with a bare metal compiler. Bare metal compilers (arm-none-eabi-) don't define __linux__. This is because the header is now in the include path for : CC arch/arm/kernel/irq.o CC kernel/sysctl.o CC crypto/api.o In file included from ../include/acpi/acpi.h:22, from ../include/linux/fw_table.h:29, from ../include/linux/acpi.h:18, from ../include/linux/irqchip.h:14, from ../arch/arm/kernel/irq.c:25: ../include/acpi/platform/acenv.h:218:2: error: #error Unknown target environment 218 | #error Unknown target environment | ^~~~~ The issue is caused by the introducing of splitting out the ACPI code to support the new generic fw_table code. Rafael suggested [1] moving the fw_table.h include in linux/acpi.h to below the linux/mutex.h. Remove the two includes in fw_table.h. Replace linux/fw_table.h include in fw_table.c with linux/acpi.h. Link: https://lore.kernel.org/linux-acpi/CAJZ5v0idWdJq3JSqQWLG5q+b+b=zkEdWR55rGYEoxh7R6N8kFQ@mail.gmail.com/ Fixes: a103f46633fd ("acpi: Move common tables helper functions to common lib") Closes: https://lore.kernel.org/linux-acpi/20231114-arm-build-bug-v1-1-458745fe32a4@linaro.org/ Reported-by: Linus Walleij Suggested-by: Rafael J. Wysocki Tested-by: Linus Walleij Signed-off-by: Dave Jiang Acked-by: Rafael J. Wysocki Signed-off-by: Rafael J. Wysocki --- include/linux/acpi.h | 22 +++++++++++----------- include/linux/fw_table.h | 3 --- lib/fw_table.c | 2 +- 3 files changed, 12 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 54189e0e5f41..4db54e928b36 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -15,7 +15,6 @@ #include #include #include -#include struct irq_domain; struct irq_domain_ops; @@ -25,22 +24,13 @@ struct irq_domain_ops; #endif #include -#ifdef CONFIG_ACPI_TABLE_LIB -#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI) -#define __init_or_acpilib -#define __initdata_or_acpilib -#else -#define EXPORT_SYMBOL_ACPI_LIB(x) -#define __init_or_acpilib __init -#define __initdata_or_acpilib __initdata -#endif - #ifdef CONFIG_ACPI #include #include #include #include +#include #include #include @@ -48,6 +38,16 @@ struct irq_domain_ops; #include #include +#ifdef CONFIG_ACPI_TABLE_LIB +#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI) +#define __init_or_acpilib +#define __initdata_or_acpilib +#else +#define EXPORT_SYMBOL_ACPI_LIB(x) +#define __init_or_acpilib __init +#define __initdata_or_acpilib __initdata +#endif + static inline acpi_handle acpi_device_handle(struct acpi_device *adev) { return adev ? adev->handle : NULL; diff --git a/include/linux/fw_table.h b/include/linux/fw_table.h index ff8fa58d5818..ca49947f0a77 100644 --- a/include/linux/fw_table.h +++ b/include/linux/fw_table.h @@ -25,9 +25,6 @@ struct acpi_subtable_proc { int count; }; -#include -#include - union acpi_subtable_headers { struct acpi_subtable_header common; struct acpi_hmat_structure hmat; diff --git a/lib/fw_table.c b/lib/fw_table.c index b51f30a28e47..294df54e33b6 100644 --- a/lib/fw_table.c +++ b/lib/fw_table.c @@ -7,7 +7,7 @@ * Copyright (C) 2023 Intel Corp. */ #include -#include +#include #include #include #include -- cgit v1.2.3 From d4e3b928ab487a8aecd1f6a140b40ac365116cfb Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 17 Nov 2023 19:13:27 -0500 Subject: closures: CLOSURE_CALLBACK() to fix type punning Control flow integrity is now checking that type signatures match on indirect function calls. That breaks closures, which embed a work_struct in a closure in such a way that a closure_fn may also be used as a workqueue fn by the underlying closure code. So we have to change closure fns to take a work_struct as their argument - but that results in a loss of clarity, as closure fns have different semantics from normal workqueue functions (they run owning a ref on the closure, which must be released with continue_at() or closure_return()). Thus, this patc introduces CLOSURE_CALLBACK() and closure_type() macros as suggested by Kees, to smooth things over a bit. Suggested-by: Kees Cook Cc: Coly Li Signed-off-by: Kent Overstreet --- drivers/md/bcache/btree.c | 14 +++---- drivers/md/bcache/journal.c | 20 +++++----- drivers/md/bcache/movinggc.c | 16 ++++---- drivers/md/bcache/request.c | 74 ++++++++++++++++++------------------- drivers/md/bcache/request.h | 2 +- drivers/md/bcache/super.c | 40 ++++++++++---------- drivers/md/bcache/writeback.c | 16 ++++---- fs/bcachefs/btree_io.c | 7 ++-- fs/bcachefs/btree_update_interior.c | 4 +- fs/bcachefs/fs-io-direct.c | 8 ++-- fs/bcachefs/io_write.c | 14 +++---- fs/bcachefs/io_write.h | 3 +- fs/bcachefs/journal_io.c | 17 ++++----- fs/bcachefs/journal_io.h | 2 +- include/linux/closure.h | 9 ++++- lib/closure.c | 5 ++- 16 files changed, 127 insertions(+), 124 deletions(-) (limited to 'include') diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index ae5cbb55861f..9441eac3d546 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -293,16 +293,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w) w->journal = NULL; } -static void btree_node_write_unlock(struct closure *cl) +static CLOSURE_CALLBACK(btree_node_write_unlock) { - struct btree *b = container_of(cl, struct btree, io); + closure_type(b, struct btree, io); up(&b->io_mutex); } -static void __btree_node_write_done(struct closure *cl) +static CLOSURE_CALLBACK(__btree_node_write_done) { - struct btree *b = container_of(cl, struct btree, io); + closure_type(b, struct btree, io); struct btree_write *w = btree_prev_write(b); bch_bbio_free(b->bio, b->c); @@ -315,12 +315,12 @@ static void __btree_node_write_done(struct closure *cl) closure_return_with_destructor(cl, btree_node_write_unlock); } -static void btree_node_write_done(struct closure *cl) +static CLOSURE_CALLBACK(btree_node_write_done) { - struct btree *b = container_of(cl, struct btree, io); + closure_type(b, struct btree, io); bio_free_pages(b->bio); - __btree_node_write_done(cl); + __btree_node_write_done(&cl->work); } static void btree_node_write_endio(struct bio *bio) diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index c182c21de2e8..7ff14bd2feb8 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -723,11 +723,11 @@ static void journal_write_endio(struct bio *bio) closure_put(&w->c->journal.io); } -static void journal_write(struct closure *cl); +static CLOSURE_CALLBACK(journal_write); -static void journal_write_done(struct closure *cl) +static CLOSURE_CALLBACK(journal_write_done) { - struct journal *j = container_of(cl, struct journal, io); + closure_type(j, struct journal, io); struct journal_write *w = (j->cur == j->w) ? &j->w[1] : &j->w[0]; @@ -736,19 +736,19 @@ static void journal_write_done(struct closure *cl) continue_at_nobarrier(cl, journal_write, bch_journal_wq); } -static void journal_write_unlock(struct closure *cl) +static CLOSURE_CALLBACK(journal_write_unlock) __releases(&c->journal.lock) { - struct cache_set *c = container_of(cl, struct cache_set, journal.io); + closure_type(c, struct cache_set, journal.io); c->journal.io_in_flight = 0; spin_unlock(&c->journal.lock); } -static void journal_write_unlocked(struct closure *cl) +static CLOSURE_CALLBACK(journal_write_unlocked) __releases(c->journal.lock) { - struct cache_set *c = container_of(cl, struct cache_set, journal.io); + closure_type(c, struct cache_set, journal.io); struct cache *ca = c->cache; struct journal_write *w = c->journal.cur; struct bkey *k = &c->journal.key; @@ -823,12 +823,12 @@ static void journal_write_unlocked(struct closure *cl) continue_at(cl, journal_write_done, NULL); } -static void journal_write(struct closure *cl) +static CLOSURE_CALLBACK(journal_write) { - struct cache_set *c = container_of(cl, struct cache_set, journal.io); + closure_type(c, struct cache_set, journal.io); spin_lock(&c->journal.lock); - journal_write_unlocked(cl); + journal_write_unlocked(&cl->work); } static void journal_try_write(struct cache_set *c) diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 9f32901fdad1..ebd500bdf0b2 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -35,16 +35,16 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k) /* Moving GC - IO loop */ -static void moving_io_destructor(struct closure *cl) +static CLOSURE_CALLBACK(moving_io_destructor) { - struct moving_io *io = container_of(cl, struct moving_io, cl); + closure_type(io, struct moving_io, cl); kfree(io); } -static void write_moving_finish(struct closure *cl) +static CLOSURE_CALLBACK(write_moving_finish) { - struct moving_io *io = container_of(cl, struct moving_io, cl); + closure_type(io, struct moving_io, cl); struct bio *bio = &io->bio.bio; bio_free_pages(bio); @@ -89,9 +89,9 @@ static void moving_init(struct moving_io *io) bch_bio_map(bio, NULL); } -static void write_moving(struct closure *cl) +static CLOSURE_CALLBACK(write_moving) { - struct moving_io *io = container_of(cl, struct moving_io, cl); + closure_type(io, struct moving_io, cl); struct data_insert_op *op = &io->op; if (!op->status) { @@ -113,9 +113,9 @@ static void write_moving(struct closure *cl) continue_at(cl, write_moving_finish, op->wq); } -static void read_moving_submit(struct closure *cl) +static CLOSURE_CALLBACK(read_moving_submit) { - struct moving_io *io = container_of(cl, struct moving_io, cl); + closure_type(io, struct moving_io, cl); struct bio *bio = &io->bio.bio; bch_submit_bbio(bio, io->op.c, &io->w->key, 0); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index a9b1f3896249..83d112bd2b1c 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -25,7 +25,7 @@ struct kmem_cache *bch_search_cache; -static void bch_data_insert_start(struct closure *cl); +static CLOSURE_CALLBACK(bch_data_insert_start); static unsigned int cache_mode(struct cached_dev *dc) { @@ -55,9 +55,9 @@ static void bio_csum(struct bio *bio, struct bkey *k) /* Insert data into cache */ -static void bch_data_insert_keys(struct closure *cl) +static CLOSURE_CALLBACK(bch_data_insert_keys) { - struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + closure_type(op, struct data_insert_op, cl); atomic_t *journal_ref = NULL; struct bkey *replace_key = op->replace ? &op->replace_key : NULL; int ret; @@ -136,9 +136,9 @@ out: continue_at(cl, bch_data_insert_keys, op->wq); } -static void bch_data_insert_error(struct closure *cl) +static CLOSURE_CALLBACK(bch_data_insert_error) { - struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + closure_type(op, struct data_insert_op, cl); /* * Our data write just errored, which means we've got a bunch of keys to @@ -163,7 +163,7 @@ static void bch_data_insert_error(struct closure *cl) op->insert_keys.top = dst; - bch_data_insert_keys(cl); + bch_data_insert_keys(&cl->work); } static void bch_data_insert_endio(struct bio *bio) @@ -184,9 +184,9 @@ static void bch_data_insert_endio(struct bio *bio) bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache"); } -static void bch_data_insert_start(struct closure *cl) +static CLOSURE_CALLBACK(bch_data_insert_start) { - struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + closure_type(op, struct data_insert_op, cl); struct bio *bio = op->bio, *n; if (op->bypass) @@ -305,16 +305,16 @@ err: * If op->bypass is true, instead of inserting the data it invalidates the * region of the cache represented by op->bio and op->inode. */ -void bch_data_insert(struct closure *cl) +CLOSURE_CALLBACK(bch_data_insert) { - struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); + closure_type(op, struct data_insert_op, cl); trace_bcache_write(op->c, op->inode, op->bio, op->writeback, op->bypass); bch_keylist_init(&op->insert_keys); bio_get(op->bio); - bch_data_insert_start(cl); + bch_data_insert_start(&cl->work); } /* @@ -575,9 +575,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) return n == bio ? MAP_DONE : MAP_CONTINUE; } -static void cache_lookup(struct closure *cl) +static CLOSURE_CALLBACK(cache_lookup) { - struct search *s = container_of(cl, struct search, iop.cl); + closure_type(s, struct search, iop.cl); struct bio *bio = &s->bio.bio; struct cached_dev *dc; int ret; @@ -698,9 +698,9 @@ static void do_bio_hook(struct search *s, bio_cnt_set(bio, 3); } -static void search_free(struct closure *cl) +static CLOSURE_CALLBACK(search_free) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); atomic_dec(&s->iop.c->search_inflight); @@ -749,20 +749,20 @@ static inline struct search *search_alloc(struct bio *bio, /* Cached devices */ -static void cached_dev_bio_complete(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_bio_complete) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); cached_dev_put(dc); - search_free(cl); + search_free(&cl->work); } /* Process reads */ -static void cached_dev_read_error_done(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_read_error_done) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); if (s->iop.replace_collision) bch_mark_cache_miss_collision(s->iop.c, s->d); @@ -770,12 +770,12 @@ static void cached_dev_read_error_done(struct closure *cl) if (s->iop.bio) bio_free_pages(s->iop.bio); - cached_dev_bio_complete(cl); + cached_dev_bio_complete(&cl->work); } -static void cached_dev_read_error(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_read_error) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); struct bio *bio = &s->bio.bio; /* @@ -801,9 +801,9 @@ static void cached_dev_read_error(struct closure *cl) continue_at(cl, cached_dev_read_error_done, NULL); } -static void cached_dev_cache_miss_done(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_cache_miss_done) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); struct bcache_device *d = s->d; if (s->iop.replace_collision) @@ -812,13 +812,13 @@ static void cached_dev_cache_miss_done(struct closure *cl) if (s->iop.bio) bio_free_pages(s->iop.bio); - cached_dev_bio_complete(cl); + cached_dev_bio_complete(&cl->work); closure_put(&d->cl); } -static void cached_dev_read_done(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_read_done) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); /* @@ -858,9 +858,9 @@ static void cached_dev_read_done(struct closure *cl) continue_at(cl, cached_dev_cache_miss_done, NULL); } -static void cached_dev_read_done_bh(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_read_done_bh) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); bch_mark_cache_accounting(s->iop.c, s->d, @@ -955,13 +955,13 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s) /* Process writes */ -static void cached_dev_write_complete(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_write_complete) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); up_read_non_owner(&dc->writeback_lock); - cached_dev_bio_complete(cl); + cached_dev_bio_complete(&cl->work); } static void cached_dev_write(struct cached_dev *dc, struct search *s) @@ -1048,9 +1048,9 @@ insert_data: continue_at(cl, cached_dev_write_complete, NULL); } -static void cached_dev_nodata(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_nodata) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); struct bio *bio = &s->bio.bio; if (s->iop.flush_journal) @@ -1265,9 +1265,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s, return MAP_CONTINUE; } -static void flash_dev_nodata(struct closure *cl) +static CLOSURE_CALLBACK(flash_dev_nodata) { - struct search *s = container_of(cl, struct search, cl); + closure_type(s, struct search, cl); if (s->iop.flush_journal) bch_journal_meta(s->iop.c, cl); diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 38ab4856eaab..46bbef00aebb 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -34,7 +34,7 @@ struct data_insert_op { }; unsigned int bch_get_congested(const struct cache_set *c); -void bch_data_insert(struct closure *cl); +CLOSURE_CALLBACK(bch_data_insert); void bch_cached_dev_request_init(struct cached_dev *dc); void cached_dev_submit_bio(struct bio *bio); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 8bd899766372..e0db905c1ca0 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -327,9 +327,9 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out, submit_bio(bio); } -static void bch_write_bdev_super_unlock(struct closure *cl) +static CLOSURE_CALLBACK(bch_write_bdev_super_unlock) { - struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); + closure_type(dc, struct cached_dev, sb_write); up(&dc->sb_write_mutex); } @@ -363,9 +363,9 @@ static void write_super_endio(struct bio *bio) closure_put(&ca->set->sb_write); } -static void bcache_write_super_unlock(struct closure *cl) +static CLOSURE_CALLBACK(bcache_write_super_unlock) { - struct cache_set *c = container_of(cl, struct cache_set, sb_write); + closure_type(c, struct cache_set, sb_write); up(&c->sb_write_mutex); } @@ -407,9 +407,9 @@ static void uuid_endio(struct bio *bio) closure_put(cl); } -static void uuid_io_unlock(struct closure *cl) +static CLOSURE_CALLBACK(uuid_io_unlock) { - struct cache_set *c = container_of(cl, struct cache_set, uuid_write); + closure_type(c, struct cache_set, uuid_write); up(&c->uuid_write_mutex); } @@ -1342,9 +1342,9 @@ void bch_cached_dev_release(struct kobject *kobj) module_put(THIS_MODULE); } -static void cached_dev_free(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_free) { - struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); + closure_type(dc, struct cached_dev, disk.cl); if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) cancel_writeback_rate_update_dwork(dc); @@ -1376,9 +1376,9 @@ static void cached_dev_free(struct closure *cl) kobject_put(&dc->disk.kobj); } -static void cached_dev_flush(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_flush) { - struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); + closure_type(dc, struct cached_dev, disk.cl); struct bcache_device *d = &dc->disk; mutex_lock(&bch_register_lock); @@ -1497,9 +1497,9 @@ void bch_flash_dev_release(struct kobject *kobj) kfree(d); } -static void flash_dev_free(struct closure *cl) +static CLOSURE_CALLBACK(flash_dev_free) { - struct bcache_device *d = container_of(cl, struct bcache_device, cl); + closure_type(d, struct bcache_device, cl); mutex_lock(&bch_register_lock); atomic_long_sub(bcache_dev_sectors_dirty(d), @@ -1510,9 +1510,9 @@ static void flash_dev_free(struct closure *cl) kobject_put(&d->kobj); } -static void flash_dev_flush(struct closure *cl) +static CLOSURE_CALLBACK(flash_dev_flush) { - struct bcache_device *d = container_of(cl, struct bcache_device, cl); + closure_type(d, struct bcache_device, cl); mutex_lock(&bch_register_lock); bcache_device_unlink(d); @@ -1668,9 +1668,9 @@ void bch_cache_set_release(struct kobject *kobj) module_put(THIS_MODULE); } -static void cache_set_free(struct closure *cl) +static CLOSURE_CALLBACK(cache_set_free) { - struct cache_set *c = container_of(cl, struct cache_set, cl); + closure_type(c, struct cache_set, cl); struct cache *ca; debugfs_remove(c->debug); @@ -1709,9 +1709,9 @@ static void cache_set_free(struct closure *cl) kobject_put(&c->kobj); } -static void cache_set_flush(struct closure *cl) +static CLOSURE_CALLBACK(cache_set_flush) { - struct cache_set *c = container_of(cl, struct cache_set, caching); + closure_type(c, struct cache_set, caching); struct cache *ca = c->cache; struct btree *b; @@ -1806,9 +1806,9 @@ static void conditional_stop_bcache_device(struct cache_set *c, } } -static void __cache_set_unregister(struct closure *cl) +static CLOSURE_CALLBACK(__cache_set_unregister) { - struct cache_set *c = container_of(cl, struct cache_set, caching); + closure_type(c, struct cache_set, caching); struct cached_dev *dc; struct bcache_device *d; size_t i; diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 24c049067f61..77427e355613 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -341,16 +341,16 @@ static void dirty_init(struct keybuf_key *w) bch_bio_map(bio, NULL); } -static void dirty_io_destructor(struct closure *cl) +static CLOSURE_CALLBACK(dirty_io_destructor) { - struct dirty_io *io = container_of(cl, struct dirty_io, cl); + closure_type(io, struct dirty_io, cl); kfree(io); } -static void write_dirty_finish(struct closure *cl) +static CLOSURE_CALLBACK(write_dirty_finish) { - struct dirty_io *io = container_of(cl, struct dirty_io, cl); + closure_type(io, struct dirty_io, cl); struct keybuf_key *w = io->bio.bi_private; struct cached_dev *dc = io->dc; @@ -400,9 +400,9 @@ static void dirty_endio(struct bio *bio) closure_put(&io->cl); } -static void write_dirty(struct closure *cl) +static CLOSURE_CALLBACK(write_dirty) { - struct dirty_io *io = container_of(cl, struct dirty_io, cl); + closure_type(io, struct dirty_io, cl); struct keybuf_key *w = io->bio.bi_private; struct cached_dev *dc = io->dc; @@ -462,9 +462,9 @@ static void read_dirty_endio(struct bio *bio) dirty_endio(bio); } -static void read_dirty_submit(struct closure *cl) +static CLOSURE_CALLBACK(read_dirty_submit) { - struct dirty_io *io = container_of(cl, struct dirty_io, cl); + closure_type(io, struct dirty_io, cl); closure_bio_submit(io->dc->disk.c, &io->bio, cl); diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 37d896edb06e..57c20390e10e 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -1358,10 +1358,9 @@ static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void * return offset; } -static void btree_node_read_all_replicas_done(struct closure *cl) +static CLOSURE_CALLBACK(btree_node_read_all_replicas_done) { - struct btree_node_read_all *ra = - container_of(cl, struct btree_node_read_all, cl); + closure_type(ra, struct btree_node_read_all, cl); struct bch_fs *c = ra->c; struct btree *b = ra->b; struct printbuf buf = PRINTBUF; @@ -1567,7 +1566,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool if (sync) { closure_sync(&ra->cl); - btree_node_read_all_replicas_done(&ra->cl); + btree_node_read_all_replicas_done(&ra->cl.work); } else { continue_at(&ra->cl, btree_node_read_all_replicas_done, c->io_complete_wq); diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index 76f27bc9fa24..d08efd6d958e 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -778,9 +778,9 @@ static void btree_interior_update_work(struct work_struct *work) } } -static void btree_update_set_nodes_written(struct closure *cl) +static CLOSURE_CALLBACK(btree_update_set_nodes_written) { - struct btree_update *as = container_of(cl, struct btree_update, cl); + closure_type(as, struct btree_update, cl); struct bch_fs *c = as->c; mutex_lock(&c->btree_interior_update_lock); diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c index 5b42a76c4796..9a479e4de6b3 100644 --- a/fs/bcachefs/fs-io-direct.c +++ b/fs/bcachefs/fs-io-direct.c @@ -35,9 +35,9 @@ static void bio_check_or_release(struct bio *bio, bool check_dirty) } } -static void bch2_dio_read_complete(struct closure *cl) +static CLOSURE_CALLBACK(bch2_dio_read_complete) { - struct dio_read *dio = container_of(cl, struct dio_read, cl); + closure_type(dio, struct dio_read, cl); dio->req->ki_complete(dio->req, dio->ret); bio_check_or_release(&dio->rbio.bio, dio->should_dirty); @@ -325,9 +325,9 @@ static noinline int bch2_dio_write_copy_iov(struct dio_write *dio) return 0; } -static void bch2_dio_write_flush_done(struct closure *cl) +static CLOSURE_CALLBACK(bch2_dio_write_flush_done) { - struct dio_write *dio = container_of(cl, struct dio_write, op.cl); + closure_type(dio, struct dio_write, op.cl); struct bch_fs *c = dio->op.c; closure_debug_destroy(cl); diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index d704a8f829c8..8ede46b1e354 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -580,9 +580,9 @@ static inline void wp_update_state(struct write_point *wp, bool running) __wp_update_state(wp, state); } -static void bch2_write_index(struct closure *cl) +static CLOSURE_CALLBACK(bch2_write_index) { - struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); + closure_type(op, struct bch_write_op, cl); struct write_point *wp = op->wp; struct workqueue_struct *wq = index_update_wq(op); unsigned long flags; @@ -1208,9 +1208,9 @@ static void __bch2_nocow_write_done(struct bch_write_op *op) bch2_nocow_write_convert_unwritten(op); } -static void bch2_nocow_write_done(struct closure *cl) +static CLOSURE_CALLBACK(bch2_nocow_write_done) { - struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); + closure_type(op, struct bch_write_op, cl); __bch2_nocow_write_done(op); bch2_write_done(cl); @@ -1363,7 +1363,7 @@ err: op->insert_keys.top = op->insert_keys.keys; } else if (op->flags & BCH_WRITE_SYNC) { closure_sync(&op->cl); - bch2_nocow_write_done(&op->cl); + bch2_nocow_write_done(&op->cl.work); } else { /* * XXX @@ -1566,9 +1566,9 @@ err: * If op->discard is true, instead of inserting the data it invalidates the * region of the cache represented by op->bio and op->inode. */ -void bch2_write(struct closure *cl) +CLOSURE_CALLBACK(bch2_write) { - struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); + closure_type(op, struct bch_write_op, cl); struct bio *bio = &op->wbio.bio; struct bch_fs *c = op->c; unsigned data_len; diff --git a/fs/bcachefs/io_write.h b/fs/bcachefs/io_write.h index 9323167229ee..6c276a48f95d 100644 --- a/fs/bcachefs/io_write.h +++ b/fs/bcachefs/io_write.h @@ -90,8 +90,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c, op->devs_need_flush = NULL; } -void bch2_write(struct closure *); - +CLOSURE_CALLBACK(bch2_write); void bch2_write_point_do_index_updates(struct work_struct *); static inline struct bch_write_bio *wbio_init(struct bio *bio) diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index 786a09285509..02e6484f9953 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -1025,10 +1025,9 @@ next_block: return 0; } -static void bch2_journal_read_device(struct closure *cl) +static CLOSURE_CALLBACK(bch2_journal_read_device) { - struct journal_device *ja = - container_of(cl, struct journal_device, read); + closure_type(ja, struct journal_device, read); struct bch_dev *ca = container_of(ja, struct bch_dev, journal); struct bch_fs *c = ca->fs; struct journal_list *jlist = @@ -1520,9 +1519,9 @@ static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j) return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK); } -static void journal_write_done(struct closure *cl) +static CLOSURE_CALLBACK(journal_write_done) { - struct journal *j = container_of(cl, struct journal, io); + closure_type(j, struct journal, io); struct bch_fs *c = container_of(j, struct bch_fs, journal); struct journal_buf *w = journal_last_unwritten_buf(j); struct bch_replicas_padded replicas; @@ -1638,9 +1637,9 @@ static void journal_write_endio(struct bio *bio) percpu_ref_put(&ca->io_ref); } -static void do_journal_write(struct closure *cl) +static CLOSURE_CALLBACK(do_journal_write) { - struct journal *j = container_of(cl, struct journal, io); + closure_type(j, struct journal, io); struct bch_fs *c = container_of(j, struct bch_fs, journal); struct bch_dev *ca; struct journal_buf *w = journal_last_unwritten_buf(j); @@ -1850,9 +1849,9 @@ static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf * return 0; } -void bch2_journal_write(struct closure *cl) +CLOSURE_CALLBACK(bch2_journal_write) { - struct journal *j = container_of(cl, struct journal, io); + closure_type(j, struct journal, io); struct bch_fs *c = container_of(j, struct bch_fs, journal); struct bch_dev *ca; struct journal_buf *w = journal_last_unwritten_buf(j); diff --git a/fs/bcachefs/journal_io.h b/fs/bcachefs/journal_io.h index a88d097b13f1..c035e7c108e1 100644 --- a/fs/bcachefs/journal_io.h +++ b/fs/bcachefs/journal_io.h @@ -60,6 +60,6 @@ void bch2_journal_ptrs_to_text(struct printbuf *, struct bch_fs *, int bch2_journal_read(struct bch_fs *, u64 *, u64 *, u64 *); -void bch2_journal_write(struct closure *); +CLOSURE_CALLBACK(bch2_journal_write); #endif /* _BCACHEFS_JOURNAL_IO_H */ diff --git a/include/linux/closure.h b/include/linux/closure.h index de7bb47d8a46..c554c6a08768 100644 --- a/include/linux/closure.h +++ b/include/linux/closure.h @@ -104,7 +104,7 @@ struct closure; struct closure_syncer; -typedef void (closure_fn) (struct closure *); +typedef void (closure_fn) (struct work_struct *); extern struct dentry *bcache_debug; struct closure_waitlist { @@ -254,7 +254,7 @@ static inline void closure_queue(struct closure *cl) INIT_WORK(&cl->work, cl->work.func); BUG_ON(!queue_work(wq, &cl->work)); } else - cl->fn(cl); + cl->fn(&cl->work); } /** @@ -309,6 +309,11 @@ static inline void closure_wake_up(struct closure_waitlist *list) __closure_wake_up(list); } +#define CLOSURE_CALLBACK(name) void name(struct work_struct *ws) +#define closure_type(name, type, member) \ + struct closure *cl = container_of(ws, struct closure, work); \ + type *name = container_of(cl, type, member) + /** * continue_at - jump to another function with barrier * diff --git a/lib/closure.c b/lib/closure.c index f86c9eeafb35..c16540552d61 100644 --- a/lib/closure.c +++ b/lib/closure.c @@ -36,7 +36,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) closure_debug_destroy(cl); if (destructor) - destructor(cl); + destructor(&cl->work); if (parent) closure_put(parent); @@ -108,8 +108,9 @@ struct closure_syncer { int done; }; -static void closure_sync_fn(struct closure *cl) +static CLOSURE_CALLBACK(closure_sync_fn) { + struct closure *cl = container_of(ws, struct closure, work); struct closure_syncer *s = cl->s; struct task_struct *p; -- cgit v1.2.3 From ef5828805842204dd0259ecfc132b5916c8a77ae Mon Sep 17 00:00:00 2001 From: Michael-CY Lee Date: Wed, 22 Nov 2023 11:02:37 +0800 Subject: wifi: avoid offset calculation on NULL pointer ieee80211_he_6ghz_oper() can be passed a NULL pointer and checks for that, but already did the calculation to inside of it before. Move it after the check. Signed-off-by: Michael-CY Lee Link: https://lore.kernel.org/r/20231122030237.31276-1-michael-cy.lee@mediatek.com [rewrite commit message] Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 958771bac9c0..c2ac9e9e7ee9 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2830,12 +2830,14 @@ ieee80211_he_oper_size(const u8 *he_oper_ie) static inline const struct ieee80211_he_6ghz_oper * ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper) { - const u8 *ret = (const void *)&he_oper->optional; + const u8 *ret; u32 he_oper_params; if (!he_oper) return NULL; + ret = (const void *)&he_oper->optional; + he_oper_params = le32_to_cpu(he_oper->he_oper_params); if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO)) -- cgit v1.2.3 From 6371be7aeb986905bb60ec73d002fc02343393b4 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Tue, 21 Nov 2023 07:56:30 +0900 Subject: scsi: Change SCSI device boolean fields to single bit flags Commit 3cc2ffe5c16d ("scsi: sd: Differentiate system and runtime start/stop management") changed the single bit manage_start_stop flag into 2 boolean fields of the SCSI device structure. Commit 24eca2dce0f8 ("scsi: sd: Introduce manage_shutdown device flag") introduced the manage_shutdown boolean field for the same structure. Together, these 2 commits increase the size of struct scsi_device by 8 bytes by using booleans instead of defining the manage_xxx fields as single bit flags, similarly to other flags of this structure. Avoid this unnecessary structure size increase and be consistent with the definition of other flags by reverting the definitions of the manage_xxx fields as single bit flags. Fixes: 3cc2ffe5c16d ("scsi: sd: Differentiate system and runtime start/stop management") Fixes: 24eca2dce0f8 ("scsi: sd: Introduce manage_shutdown device flag") Cc: Signed-off-by: Damien Le Moal Link: https://lore.kernel.org/r/20231120225631.37938-2-dlemoal@kernel.org Reviewed-by: Niklas Cassel Signed-off-by: Martin K. Petersen --- drivers/ata/libata-scsi.c | 4 ++-- drivers/firewire/sbp2.c | 6 +++--- include/scsi/scsi_device.h | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index c10ff8985203..63317449f6ea 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1056,8 +1056,8 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) * and resume and shutdown only. For system level suspend/resume, * devices power state is handled directly by libata EH. */ - sdev->manage_runtime_start_stop = true; - sdev->manage_shutdown = true; + sdev->manage_runtime_start_stop = 1; + sdev->manage_shutdown = 1; } /* diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 7edf2c95282f..e779d866022b 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -1519,9 +1519,9 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev) sdev->use_10_for_rw = 1; if (sbp2_param_exclusive_login) { - sdev->manage_system_start_stop = true; - sdev->manage_runtime_start_stop = true; - sdev->manage_shutdown = true; + sdev->manage_system_start_stop = 1; + sdev->manage_runtime_start_stop = 1; + sdev->manage_shutdown = 1; } if (sdev->type == TYPE_ROM) diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 10480eb582b2..1fb460dfca0c 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -167,19 +167,19 @@ struct scsi_device { * power state for system suspend/resume (suspend to RAM and * hibernation) operations. */ - bool manage_system_start_stop; + unsigned manage_system_start_stop:1; /* * If true, let the high-level device driver (sd) manage the device * power state for runtime device suspand and resume operations. */ - bool manage_runtime_start_stop; + unsigned manage_runtime_start_stop:1; /* * If true, let the high-level device driver (sd) manage the device * power state for system shutdown (power off) operations. */ - bool manage_shutdown; + unsigned manage_shutdown:1; unsigned removable:1; unsigned changed:1; /* Data invalid due to media change */ -- cgit v1.2.3 From b09d7f8fd50f6e93cbadd8d27fde178f745b42a1 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Tue, 21 Nov 2023 07:56:31 +0900 Subject: scsi: sd: Fix system start for ATA devices It is not always possible to keep a device in the runtime suspended state when a system level suspend/resume cycle is executed. E.g. for ATA devices connected to AHCI adapters, system resume resets the ATA ports, which causes connected devices to spin up. In such case, a runtime suspended disk will incorrectly be seen with a suspended runtime state because the device is not resumed by sd_resume_system(). The power state seen by the user is different than the actual device physical power state. Fix this issue by introducing the struct scsi_device flag force_runtime_start_on_system_start. When set, this flag causes sd_resume_system() to request a runtime resume operation for runtime suspended devices. This results in the user seeing the device runtime_state as active after a system resume, thus correctly reflecting the device physical power state. Fixes: 9131bff6a9f1 ("scsi: core: pm: Only runtime resume if necessary") Cc: Signed-off-by: Damien Le Moal Link: https://lore.kernel.org/r/20231120225631.37938-3-dlemoal@kernel.org Signed-off-by: Martin K. Petersen --- drivers/ata/libata-scsi.c | 5 +++++ drivers/scsi/sd.c | 9 ++++++++- include/scsi/scsi_device.h | 6 ++++++ 3 files changed, 19 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 63317449f6ea..0a0f483124c3 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1055,9 +1055,14 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) * Ask the sd driver to issue START STOP UNIT on runtime suspend * and resume and shutdown only. For system level suspend/resume, * devices power state is handled directly by libata EH. + * Given that disks are always spun up on system resume, also + * make sure that the sd driver forces runtime suspended disks + * to be resumed to correctly reflect the power state of the + * device. */ sdev->manage_runtime_start_stop = 1; sdev->manage_shutdown = 1; + sdev->force_runtime_start_on_system_start = 1; } /* diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index fa00dd503cbf..542a4bbb21bc 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3949,8 +3949,15 @@ static int sd_resume(struct device *dev, bool runtime) static int sd_resume_system(struct device *dev) { - if (pm_runtime_suspended(dev)) + if (pm_runtime_suspended(dev)) { + struct scsi_disk *sdkp = dev_get_drvdata(dev); + struct scsi_device *sdp = sdkp ? sdkp->device : NULL; + + if (sdp && sdp->force_runtime_start_on_system_start) + pm_request_resume(dev); + return 0; + } return sd_resume(dev, false); } diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 1fb460dfca0c..5ec1e71a09de 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -181,6 +181,12 @@ struct scsi_device { */ unsigned manage_shutdown:1; + /* + * If set and if the device is runtime suspended, ask the high-level + * device driver (sd) to force a runtime resume of the device. + */ + unsigned force_runtime_start_on_system_start:1; + unsigned removable:1; unsigned changed:1; /* Data invalid due to media change */ unsigned busy:1; /* Used to prevent races */ -- cgit v1.2.3 From a2e7e59a94269484a83386972ca07c22fd188854 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 15 Nov 2023 18:25:44 +0000 Subject: iommu: Avoid more races around device probe MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It turns out there are more subtle races beyond just the main part of __iommu_probe_device() itself running in parallel - the dev_iommu_free() on the way out of an unsuccessful probe can still manage to trip up concurrent accesses to a device's fwspec. Thus, extend the scope of iommu_probe_device_lock() to also serialise fwspec creation and initial retrieval. Reported-by: Zhenhua Huang Link: https://lore.kernel.org/linux-iommu/e2e20e1c-6450-4ac5-9804-b0000acdf7de@quicinc.com/ Fixes: 01657bc14a39 ("iommu: Avoid races around device probe") Signed-off-by: Robin Murphy Acked-by: Greg Kroah-Hartman Reviewed-by: André Draszik Tested-by: André Draszik Link: https://lore.kernel.org/r/16f433658661d7cadfea51e7c65da95826112a2b.1700071477.git.robin.murphy@arm.com Cc: stable@vger.kernel.org Signed-off-by: Joerg Roedel --- drivers/acpi/scan.c | 7 ++++++- drivers/iommu/iommu.c | 20 ++++++++++---------- drivers/iommu/of_iommu.c | 12 +++++++++--- include/linux/iommu.h | 1 + 4 files changed, 26 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index fa5dd71a80fa..02bb2cce423f 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1568,17 +1568,22 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev, int err; const struct iommu_ops *ops; + /* Serialise to make dev->iommu stable under our potential fwspec */ + mutex_lock(&iommu_probe_device_lock); /* * If we already translated the fwspec there is nothing left to do, * return the iommu_ops. */ ops = acpi_iommu_fwspec_ops(dev); - if (ops) + if (ops) { + mutex_unlock(&iommu_probe_device_lock); return ops; + } err = iort_iommu_configure_id(dev, id_in); if (err && err != -EPROBE_DEFER) err = viot_iommu_configure(dev); + mutex_unlock(&iommu_probe_device_lock); /* * If we have reason to believe the IOMMU driver missed the initial diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index c9a05bb49bfa..33e2a9b5d339 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -485,11 +485,12 @@ static void iommu_deinit_device(struct device *dev) dev_iommu_free(dev); } +DEFINE_MUTEX(iommu_probe_device_lock); + static int __iommu_probe_device(struct device *dev, struct list_head *group_list) { const struct iommu_ops *ops = dev->bus->iommu_ops; struct iommu_group *group; - static DEFINE_MUTEX(iommu_probe_device_lock); struct group_device *gdev; int ret; @@ -502,17 +503,15 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list * probably be able to use device_lock() here to minimise the scope, * but for now enforcing a simple global ordering is fine. */ - mutex_lock(&iommu_probe_device_lock); + lockdep_assert_held(&iommu_probe_device_lock); /* Device is probed already if in a group */ - if (dev->iommu_group) { - ret = 0; - goto out_unlock; - } + if (dev->iommu_group) + return 0; ret = iommu_init_device(dev, ops); if (ret) - goto out_unlock; + return ret; group = dev->iommu_group; gdev = iommu_group_alloc_device(group, dev); @@ -548,7 +547,6 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list list_add_tail(&group->entry, group_list); } mutex_unlock(&group->mutex); - mutex_unlock(&iommu_probe_device_lock); if (dev_is_pci(dev)) iommu_dma_set_pci_32bit_workaround(dev); @@ -562,8 +560,6 @@ err_put_group: iommu_deinit_device(dev); mutex_unlock(&group->mutex); iommu_group_put(group); -out_unlock: - mutex_unlock(&iommu_probe_device_lock); return ret; } @@ -573,7 +569,9 @@ int iommu_probe_device(struct device *dev) const struct iommu_ops *ops; int ret; + mutex_lock(&iommu_probe_device_lock); ret = __iommu_probe_device(dev, NULL); + mutex_unlock(&iommu_probe_device_lock); if (ret) return ret; @@ -1822,7 +1820,9 @@ static int probe_iommu_group(struct device *dev, void *data) struct list_head *group_list = data; int ret; + mutex_lock(&iommu_probe_device_lock); ret = __iommu_probe_device(dev, group_list); + mutex_unlock(&iommu_probe_device_lock); if (ret == -ENODEV) ret = 0; diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 157b286e36bf..c25b4ae6aeee 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -112,16 +112,20 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, const u32 *id) { const struct iommu_ops *ops = NULL; - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct iommu_fwspec *fwspec; int err = NO_IOMMU; if (!master_np) return NULL; + /* Serialise to make dev->iommu stable under our potential fwspec */ + mutex_lock(&iommu_probe_device_lock); + fwspec = dev_iommu_fwspec_get(dev); if (fwspec) { - if (fwspec->ops) + if (fwspec->ops) { + mutex_unlock(&iommu_probe_device_lock); return fwspec->ops; - + } /* In the deferred case, start again from scratch */ iommu_fwspec_free(dev); } @@ -155,6 +159,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, fwspec = dev_iommu_fwspec_get(dev); ops = fwspec->ops; } + mutex_unlock(&iommu_probe_device_lock); + /* * If we have reason to believe the IOMMU driver missed the initial * probe for dev, replay it to get things in order. diff --git a/include/linux/iommu.h b/include/linux/iommu.h index ec289c1016f5..6291aa7b079b 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -845,6 +845,7 @@ static inline void dev_iommu_priv_set(struct device *dev, void *priv) dev->iommu->priv = priv; } +extern struct mutex iommu_probe_device_lock; int iommu_probe_device(struct device *dev); int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); -- cgit v1.2.3 From 8c88a474357ead632b07c70bf7f119ace8c3b39e Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 24 Nov 2023 17:25:26 +0100 Subject: debugfs: add API to allow debugfs operations cancellation In some cases there might be longer-running hardware accesses in debugfs files, or attempts to acquire locks, and we want to still be able to quickly remove the files. Introduce a cancellations API to use inside the debugfs handler functions to be able to cancel such operations on a per-file basis. Acked-by: Greg Kroah-Hartman Signed-off-by: Johannes Berg --- fs/debugfs/file.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++++ fs/debugfs/inode.c | 32 ++++++++++++++++++- fs/debugfs/internal.h | 5 +++ include/linux/debugfs.h | 19 ++++++++++++ 4 files changed, 137 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 3eff92450fd5..5568cdea3490 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -114,6 +114,8 @@ int debugfs_file_get(struct dentry *dentry) lockdep_init_map(&fsd->lockdep_map, fsd->lock_name ?: "debugfs", &fsd->key, 0); #endif + INIT_LIST_HEAD(&fsd->cancellations); + mutex_init(&fsd->cancellations_mtx); } /* @@ -156,6 +158,86 @@ void debugfs_file_put(struct dentry *dentry) } EXPORT_SYMBOL_GPL(debugfs_file_put); +/** + * debugfs_enter_cancellation - enter a debugfs cancellation + * @file: the file being accessed + * @cancellation: the cancellation object, the cancel callback + * inside of it must be initialized + * + * When a debugfs file is removed it needs to wait for all active + * operations to complete. However, the operation itself may need + * to wait for hardware or completion of some asynchronous process + * or similar. As such, it may need to be cancelled to avoid long + * waits or even deadlocks. + * + * This function can be used inside a debugfs handler that may + * need to be cancelled. As soon as this function is called, the + * cancellation's 'cancel' callback may be called, at which point + * the caller should proceed to call debugfs_leave_cancellation() + * and leave the debugfs handler function as soon as possible. + * Note that the 'cancel' callback is only ever called in the + * context of some kind of debugfs_remove(). + * + * This function must be paired with debugfs_leave_cancellation(). + */ +void debugfs_enter_cancellation(struct file *file, + struct debugfs_cancellation *cancellation) +{ + struct debugfs_fsdata *fsd; + struct dentry *dentry = F_DENTRY(file); + + INIT_LIST_HEAD(&cancellation->list); + + if (WARN_ON(!d_is_reg(dentry))) + return; + + if (WARN_ON(!cancellation->cancel)) + return; + + fsd = READ_ONCE(dentry->d_fsdata); + if (WARN_ON(!fsd || + ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))) + return; + + mutex_lock(&fsd->cancellations_mtx); + list_add(&cancellation->list, &fsd->cancellations); + mutex_unlock(&fsd->cancellations_mtx); + + /* if we're already removing wake it up to cancel */ + if (d_unlinked(dentry)) + complete(&fsd->active_users_drained); +} +EXPORT_SYMBOL_GPL(debugfs_enter_cancellation); + +/** + * debugfs_leave_cancellation - leave cancellation section + * @file: the file being accessed + * @cancellation: the cancellation previously registered with + * debugfs_enter_cancellation() + * + * See the documentation of debugfs_enter_cancellation(). + */ +void debugfs_leave_cancellation(struct file *file, + struct debugfs_cancellation *cancellation) +{ + struct debugfs_fsdata *fsd; + struct dentry *dentry = F_DENTRY(file); + + if (WARN_ON(!d_is_reg(dentry))) + return; + + fsd = READ_ONCE(dentry->d_fsdata); + if (WARN_ON(!fsd || + ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))) + return; + + mutex_lock(&fsd->cancellations_mtx); + if (!list_empty(&cancellation->list)) + list_del(&cancellation->list); + mutex_unlock(&fsd->cancellations_mtx); +} +EXPORT_SYMBOL_GPL(debugfs_leave_cancellation); + /* * Only permit access to world-readable files when the kernel is locked down. * We also need to exclude any file that has ways to write or alter it as root diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 80f4f000dcc1..d53c2860b03c 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -247,6 +247,8 @@ static void debugfs_release_dentry(struct dentry *dentry) lockdep_unregister_key(&fsd->key); kfree(fsd->lock_name); #endif + WARN_ON(!list_empty(&fsd->cancellations)); + mutex_destroy(&fsd->cancellations_mtx); } kfree(fsd); @@ -756,8 +758,36 @@ static void __debugfs_file_removed(struct dentry *dentry) lock_map_acquire(&fsd->lockdep_map); lock_map_release(&fsd->lockdep_map); - if (!refcount_dec_and_test(&fsd->active_users)) + /* if we hit zero, just wait for all to finish */ + if (!refcount_dec_and_test(&fsd->active_users)) { wait_for_completion(&fsd->active_users_drained); + return; + } + + /* if we didn't hit zero, try to cancel any we can */ + while (refcount_read(&fsd->active_users)) { + struct debugfs_cancellation *c; + + /* + * Lock the cancellations. Note that the cancellations + * structs are meant to be on the stack, so we need to + * ensure we either use them here or don't touch them, + * and debugfs_leave_cancellation() will wait for this + * to be finished processing before exiting one. It may + * of course win and remove the cancellation, but then + * chances are we never even got into this bit, we only + * do if the refcount isn't zero already. + */ + mutex_lock(&fsd->cancellations_mtx); + while ((c = list_first_entry_or_null(&fsd->cancellations, + typeof(*c), list))) { + list_del_init(&c->list); + c->cancel(dentry, c->cancel_data); + } + mutex_unlock(&fsd->cancellations_mtx); + + wait_for_completion(&fsd->active_users_drained); + } } static void remove_one(struct dentry *victim) diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h index c7d61cfc97d2..0c4c68cf161f 100644 --- a/fs/debugfs/internal.h +++ b/fs/debugfs/internal.h @@ -8,6 +8,7 @@ #ifndef _DEBUGFS_INTERNAL_H_ #define _DEBUGFS_INTERNAL_H_ #include +#include struct file_operations; @@ -29,6 +30,10 @@ struct debugfs_fsdata { struct lock_class_key key; char *lock_name; #endif + + /* protect cancellations */ + struct mutex cancellations_mtx; + struct list_head cancellations; }; }; }; diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index ea2d919fd9c7..c9c65b132c0f 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -171,6 +171,25 @@ ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf, ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); +/** + * struct debugfs_cancellation - cancellation data + * @list: internal, for keeping track + * @cancel: callback to call + * @cancel_data: extra data for the callback to call + */ +struct debugfs_cancellation { + struct list_head list; + void (*cancel)(struct dentry *, void *); + void *cancel_data; +}; + +void __acquires(cancellation) +debugfs_enter_cancellation(struct file *file, + struct debugfs_cancellation *cancellation); +void __releases(cancellation) +debugfs_leave_cancellation(struct file *file, + struct debugfs_cancellation *cancellation); + #else #include -- cgit v1.2.3 From b590b9ae1efc30e52f81d95cdb2519a4c248b965 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 24 Nov 2023 17:25:27 +0100 Subject: wifi: cfg80211: add locked debugfs wrappers Add wrappers for debugfs files that should be called with the wiphy mutex held, while the file is also to be removed under the wiphy mutex. This could otherwise deadlock when a file is trying to acquire the wiphy mutex while the code removing it holds the mutex but waits for the removal. This actually works by pushing the execution of the read or write handler to a wiphy work that can be cancelled using the debugfs cancellation API. Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 46 ++++++++++++++ net/wireless/debugfs.c | 160 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 206 insertions(+) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index b137a33a1b68..4ecfb06c413d 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -9299,4 +9299,50 @@ bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap, */ void cfg80211_links_removed(struct net_device *dev, u16 link_mask); +#ifdef CONFIG_CFG80211_DEBUGFS +/** + * wiphy_locked_debugfs_read - do a locked read in debugfs + * @wiphy: the wiphy to use + * @file: the file being read + * @buf: the buffer to fill and then read from + * @bufsize: size of the buffer + * @userbuf: the user buffer to copy to + * @count: read count + * @ppos: read position + * @handler: the read handler to call (under wiphy lock) + * @data: additional data to pass to the read handler + */ +ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file, + char *buf, size_t bufsize, + char __user *userbuf, size_t count, + loff_t *ppos, + ssize_t (*handler)(struct wiphy *wiphy, + struct file *file, + char *buf, + size_t bufsize, + void *data), + void *data); + +/** + * wiphy_locked_debugfs_write - do a locked write in debugfs + * @wiphy: the wiphy to use + * @file: the file being written to + * @buf: the buffer to copy the user data to + * @bufsize: size of the buffer + * @userbuf: the user buffer to copy from + * @count: read count + * @handler: the write handler to call (under wiphy lock) + * @data: additional data to pass to the write handler + */ +ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file, + char *buf, size_t bufsize, + const char __user *userbuf, size_t count, + ssize_t (*handler)(struct wiphy *wiphy, + struct file *file, + char *buf, + size_t count, + void *data), + void *data); +#endif + #endif /* __NET_CFG80211_H */ diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c index 0878b162890a..40e49074e2ee 100644 --- a/net/wireless/debugfs.c +++ b/net/wireless/debugfs.c @@ -4,6 +4,7 @@ * * Copyright 2009 Luis R. Rodriguez * Copyright 2007 Johannes Berg + * Copyright (C) 2023 Intel Corporation */ #include @@ -109,3 +110,162 @@ void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) DEBUGFS_ADD(long_retry_limit); DEBUGFS_ADD(ht40allow_map); } + +struct debugfs_read_work { + struct wiphy_work work; + ssize_t (*handler)(struct wiphy *wiphy, + struct file *file, + char *buf, + size_t count, + void *data); + struct wiphy *wiphy; + struct file *file; + char *buf; + size_t bufsize; + void *data; + ssize_t ret; + struct completion completion; +}; + +static void wiphy_locked_debugfs_read_work(struct wiphy *wiphy, + struct wiphy_work *work) +{ + struct debugfs_read_work *w = container_of(work, typeof(*w), work); + + w->ret = w->handler(w->wiphy, w->file, w->buf, w->bufsize, w->data); + complete(&w->completion); +} + +static void wiphy_locked_debugfs_read_cancel(struct dentry *dentry, + void *data) +{ + struct debugfs_read_work *w = data; + + wiphy_work_cancel(w->wiphy, &w->work); + complete(&w->completion); +} + +ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file, + char *buf, size_t bufsize, + char __user *userbuf, size_t count, + loff_t *ppos, + ssize_t (*handler)(struct wiphy *wiphy, + struct file *file, + char *buf, + size_t bufsize, + void *data), + void *data) +{ + struct debugfs_read_work work = { + .handler = handler, + .wiphy = wiphy, + .file = file, + .buf = buf, + .bufsize = bufsize, + .data = data, + .ret = -ENODEV, + .completion = COMPLETION_INITIALIZER_ONSTACK(work.completion), + }; + struct debugfs_cancellation cancellation = { + .cancel = wiphy_locked_debugfs_read_cancel, + .cancel_data = &work, + }; + + /* don't leak stack data or whatever */ + memset(buf, 0, bufsize); + + wiphy_work_init(&work.work, wiphy_locked_debugfs_read_work); + wiphy_work_queue(wiphy, &work.work); + + debugfs_enter_cancellation(file, &cancellation); + wait_for_completion(&work.completion); + debugfs_leave_cancellation(file, &cancellation); + + if (work.ret < 0) + return work.ret; + + if (WARN_ON(work.ret > bufsize)) + return -EINVAL; + + return simple_read_from_buffer(userbuf, count, ppos, buf, work.ret); +} +EXPORT_SYMBOL_GPL(wiphy_locked_debugfs_read); + +struct debugfs_write_work { + struct wiphy_work work; + ssize_t (*handler)(struct wiphy *wiphy, + struct file *file, + char *buf, + size_t count, + void *data); + struct wiphy *wiphy; + struct file *file; + char *buf; + size_t count; + void *data; + ssize_t ret; + struct completion completion; +}; + +static void wiphy_locked_debugfs_write_work(struct wiphy *wiphy, + struct wiphy_work *work) +{ + struct debugfs_write_work *w = container_of(work, typeof(*w), work); + + w->ret = w->handler(w->wiphy, w->file, w->buf, w->count, w->data); + complete(&w->completion); +} + +static void wiphy_locked_debugfs_write_cancel(struct dentry *dentry, + void *data) +{ + struct debugfs_write_work *w = data; + + wiphy_work_cancel(w->wiphy, &w->work); + complete(&w->completion); +} + +ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, + struct file *file, char *buf, size_t bufsize, + const char __user *userbuf, size_t count, + ssize_t (*handler)(struct wiphy *wiphy, + struct file *file, + char *buf, + size_t count, + void *data), + void *data) +{ + struct debugfs_write_work work = { + .handler = handler, + .wiphy = wiphy, + .file = file, + .buf = buf, + .count = count, + .data = data, + .ret = -ENODEV, + .completion = COMPLETION_INITIALIZER_ONSTACK(work.completion), + }; + struct debugfs_cancellation cancellation = { + .cancel = wiphy_locked_debugfs_write_cancel, + .cancel_data = &work, + }; + + /* mostly used for strings so enforce NUL-termination for safety */ + if (count >= bufsize) + return -EINVAL; + + memset(buf, 0, bufsize); + + if (copy_from_user(buf, userbuf, count)) + return -EFAULT; + + wiphy_work_init(&work.work, wiphy_locked_debugfs_write_work); + wiphy_work_queue(wiphy, &work.work); + + debugfs_enter_cancellation(file, &cancellation); + wait_for_completion(&work.completion); + debugfs_leave_cancellation(file, &cancellation); + + return work.ret; +} +EXPORT_SYMBOL_GPL(wiphy_locked_debugfs_write); -- cgit v1.2.3 From 95ba893c9f4feb836ddce627efd0bb6af6667031 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 14 Nov 2023 13:37:09 +0100 Subject: dma-buf: fix check in dma_resv_add_fence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's valid to add the same fence multiple times to a dma-resv object and we shouldn't need one extra slot for each. Signed-off-by: Christian König Reviewed-by: Thomas Hellström Fixes: a3f7c10a269d5 ("dma-buf/dma-resv: check if the new fence is really later") Cc: stable@vger.kernel.org # v5.19+ Link: https://patchwork.freedesktop.org/patch/msgid/20231115093035.1889-1-christian.koenig@amd.com --- drivers/dma-buf/dma-resv.c | 2 +- include/linux/dma-fence.h | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 38b4110378de..eb8b733065b2 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -301,7 +301,7 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, dma_resv_list_entry(fobj, i, obj, &old, &old_usage); if ((old->context == fence->context && old_usage >= usage && - dma_fence_is_later(fence, old)) || + dma_fence_is_later_or_same(fence, old)) || dma_fence_is_signaled(old)) { dma_resv_list_set(fobj, i, fence, usage); dma_fence_put(old); diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index ebe78bd3d121..b3772edca2e6 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -498,6 +498,21 @@ static inline bool dma_fence_is_later(struct dma_fence *f1, return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops); } +/** + * dma_fence_is_later_or_same - return true if f1 is later or same as f2 + * @f1: the first fence from the same context + * @f2: the second fence from the same context + * + * Returns true if f1 is chronologically later than f2 or the same fence. Both + * fences must be from the same context, since a seqno is not re-used across + * contexts. + */ +static inline bool dma_fence_is_later_or_same(struct dma_fence *f1, + struct dma_fence *f2) +{ + return f1 == f2 || dma_fence_is_later(f1, f2); +} + /** * dma_fence_later - return the chronologically later fence * @f1: the first fence from the same context -- cgit v1.2.3 From 4e86f32a13af1970d21be94f659cae56bbe487ee Mon Sep 17 00:00:00 2001 From: Dmitry Antipov Date: Mon, 20 Nov 2023 14:05:08 +0300 Subject: uapi: propagate __struct_group() attributes to the container union Recently the kernel test robot has reported an ARM-specific BUILD_BUG_ON() in an old and unmaintained wil6210 wireless driver. The problem comes from the structure packing rules of old ARM ABI ('-mabi=apcs-gnu'). For example, the following structure is packed to 18 bytes instead of 16: struct poorly_packed { unsigned int a; unsigned int b; unsigned short c; union { struct { unsigned short d; unsigned int e; } __attribute__((packed)); struct { unsigned short d; unsigned int e; } __attribute__((packed)) inner; }; } __attribute__((packed)); To fit it into 16 bytes, it's required to add packed attribute to the container union as well: struct poorly_packed { unsigned int a; unsigned int b; unsigned short c; union { struct { unsigned short d; unsigned int e; } __attribute__((packed)); struct { unsigned short d; unsigned int e; } __attribute__((packed)) inner; } __attribute__((packed)); } __attribute__((packed)); Thanks to Andrew Pinski of GCC team for sorting the things out at https://gcc.gnu.org/pipermail/gcc/2023-November/242888.html. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202311150821.cI4yciFE-lkp@intel.com Signed-off-by: Dmitry Antipov Link: https://lore.kernel.org/r/20231120110607.98956-1-dmantipov@yandex.ru Fixes: 50d7bd38c3aa ("stddef: Introduce struct_group() helper macro") Signed-off-by: Kees Cook --- include/uapi/linux/stddef.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h index 5c6c4269f7ef..2ec6f35cda32 100644 --- a/include/uapi/linux/stddef.h +++ b/include/uapi/linux/stddef.h @@ -27,7 +27,7 @@ union { \ struct { MEMBERS } ATTRS; \ struct TAG { MEMBERS } ATTRS NAME; \ - } + } ATTRS #ifdef __cplusplus /* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */ -- cgit v1.2.3 From b9c02e1052650af56d4487efa5fade3fb70e3653 Mon Sep 17 00:00:00 2001 From: Thomas Hellström Date: Mon, 6 Nov 2023 12:48:27 +0100 Subject: drm/gpuvm: Fix deprecated license identifier MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit "GPL-2.0-only" in the license header was incorrectly changed to the now deprecated "GPL-2.0". Fix. Cc: Maxime Ripard Cc: Danilo Krummrich Reported-by: David Edelsohn Closes: https://lore.kernel.org/dri-devel/5lfrhdpkwhpgzipgngojs3tyqfqbesifzu5nf4l5q3nhfdhcf2@25nmiq7tfrew/T/#m5c356d68815711eea30dd94cc6f7ea8cd4344fe3 Fixes: f7749a549b4f ("drm/gpuvm: Dual-licence the drm_gpuvm code GPL-2.0 OR MIT") Signed-off-by: Thomas Hellström Acked-by: Maxime Ripard Acked-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231106114827.62492-1-thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/drm_gpuvm.c | 2 +- include/drm/drm_gpuvm.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index 08c088319652..b80d4e1cc9b7 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT +// SPDX-License-Identifier: GPL-2.0-only OR MIT /* * Copyright (c) 2022 Red Hat. * diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index bdfafc4a7705..c7a0594bdab1 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ #ifndef __DRM_GPUVM_H__ #define __DRM_GPUVM_H__ -- cgit v1.2.3 From 45b3fae4675dc1d4ee2d7aefa19d85ee4f891377 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Sat, 25 Nov 2023 15:33:58 -0600 Subject: neighbour: Fix __randomize_layout crash in struct neighbour Previously, one-element and zero-length arrays were treated as true flexible arrays, even though they are actually "fake" flex arrays. The __randomize_layout would leave them untouched at the end of the struct, similarly to proper C99 flex-array members. However, this approach changed with commit 1ee60356c2dc ("gcc-plugins: randstruct: Only warn about true flexible arrays"). Now, only C99 flexible-array members will remain untouched at the end of the struct, while one-element and zero-length arrays will be subject to randomization. Fix a `__randomize_layout` crash in `struct neighbour` by transforming zero-length array `primary_key` into a proper C99 flexible-array member. Fixes: 1ee60356c2dc ("gcc-plugins: randstruct: Only warn about true flexible arrays") Closes: https://lore.kernel.org/linux-hardening/20231124102458.GB1503258@e124191.cambridge.arm.com/ Signed-off-by: Gustavo A. R. Silva Reviewed-by: Kees Cook Tested-by: Joey Gouly Link: https://lore.kernel.org/r/ZWJoRsJGnCPdJ3+2@work Signed-off-by: Paolo Abeni --- include/net/neighbour.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 07022bb0d44d..0d28172193fa 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -162,7 +162,7 @@ struct neighbour { struct rcu_head rcu; struct net_device *dev; netdevice_tracker dev_tracker; - u8 primary_key[0]; + u8 primary_key[]; } __randomize_layout; struct neigh_ops { -- cgit v1.2.3 From c392cbecd8eca4c53f2bf508731257d9d0a21c2d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 27 Nov 2023 16:47:04 -0700 Subject: io_uring/kbuf: defer release of mapped buffer rings If a provided buffer ring is setup with IOU_PBUF_RING_MMAP, then the kernel allocates the memory for it and the application is expected to mmap(2) this memory. However, io_uring uses remap_pfn_range() for this operation, so we cannot rely on normal munmap/release on freeing them for us. Stash an io_buf_free entry away for each of these, if any, and provide a helper to free them post ->release(). Cc: stable@vger.kernel.org Fixes: c56e022c0a27 ("io_uring: add support for user mapped provided buffer ring") Reported-by: Jann Horn Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 3 +++ io_uring/io_uring.c | 2 ++ io_uring/kbuf.c | 44 +++++++++++++++++++++++++++++++++++++----- io_uring/kbuf.h | 2 ++ 4 files changed, 46 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index d3009d56af0b..805bb635cdf5 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -340,6 +340,9 @@ struct io_ring_ctx { struct list_head io_buffers_cache; + /* deferred free list, protected by ->uring_lock */ + struct hlist_head io_buf_list; + /* Keep this last, we don't need it for the fast path */ struct wait_queue_head poll_wq; struct io_restriction restrictions; diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index e40b11438210..3a216f0744dd 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -325,6 +325,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_LIST_HEAD(&ctx->sqd_list); INIT_LIST_HEAD(&ctx->cq_overflow_list); INIT_LIST_HEAD(&ctx->io_buffers_cache); + INIT_HLIST_HEAD(&ctx->io_buf_list); io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX, sizeof(struct io_rsrc_node)); io_alloc_cache_init(&ctx->apoll_cache, IO_ALLOC_CACHE_MAX, @@ -2950,6 +2951,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) ctx->mm_account = NULL; } io_rings_free(ctx); + io_kbuf_mmap_list_free(ctx); percpu_ref_exit(&ctx->refs); free_uid(ctx->user); diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index a1e4239c7d75..85e680fc74ce 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -33,6 +33,11 @@ struct io_provide_buf { __u16 bid; }; +struct io_buf_free { + struct hlist_node list; + void *mem; +}; + static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx, unsigned int bgid) { @@ -223,7 +228,10 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, if (bl->is_mapped) { i = bl->buf_ring->tail - bl->head; if (bl->is_mmap) { - folio_put(virt_to_folio(bl->buf_ring)); + /* + * io_kbuf_list_free() will free the page(s) at + * ->release() time. + */ bl->buf_ring = NULL; bl->is_mmap = 0; } else if (bl->buf_nr_pages) { @@ -531,18 +539,28 @@ error_unpin: return -EINVAL; } -static int io_alloc_pbuf_ring(struct io_uring_buf_reg *reg, +static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx, + struct io_uring_buf_reg *reg, struct io_buffer_list *bl) { - gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP; + struct io_buf_free *ibf; size_t ring_size; void *ptr; ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring); - ptr = (void *) __get_free_pages(gfp, get_order(ring_size)); + ptr = io_mem_alloc(ring_size); if (!ptr) return -ENOMEM; + /* Allocate and store deferred free entry */ + ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT); + if (!ibf) { + io_mem_free(ptr); + return -ENOMEM; + } + ibf->mem = ptr; + hlist_add_head(&ibf->list, &ctx->io_buf_list); + bl->buf_ring = ptr; bl->is_mapped = 1; bl->is_mmap = 1; @@ -599,7 +617,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) if (!(reg.flags & IOU_PBUF_RING_MMAP)) ret = io_pin_pbuf_ring(®, bl); else - ret = io_alloc_pbuf_ring(®, bl); + ret = io_alloc_pbuf_ring(ctx, ®, bl); if (!ret) { bl->nr_entries = reg.ring_entries; @@ -649,3 +667,19 @@ void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid) return bl->buf_ring; } + +/* + * Called at or after ->release(), free the mmap'ed buffers that we used + * for memory mapped provided buffer rings. + */ +void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx) +{ + struct io_buf_free *ibf; + struct hlist_node *tmp; + + hlist_for_each_entry_safe(ibf, tmp, &ctx->io_buf_list, list) { + hlist_del(&ibf->list); + io_mem_free(ibf->mem); + kfree(ibf); + } +} diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h index f2d615236b2c..6c7646e6057c 100644 --- a/io_uring/kbuf.h +++ b/io_uring/kbuf.h @@ -51,6 +51,8 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags); int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); +void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx); + unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags); bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); -- cgit v1.2.3 From fad907cffd4bde7384812cf32fcf69becab805cc Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 28 Nov 2023 20:30:26 +0800 Subject: block: move .bd_inode into 1st cacheline of block_device The .bd_inode field of block_device is used in IO fast path of blkdev_write_iter() and blkdev_llseek(), so it is more efficient to keep it into the 1st cacheline. .bd_openers is only touched in open()/close(), and .bd_size_lock is only for updating bdev capacity, which is in slow path too. So swap .bd_inode layout with .bd_openers & .bd_size_lock to move .bd_inode into the 1st cache line. Cc: Yu Kuai Signed-off-by: Ming Lei Signed-off-by: Yu Kuai Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20231128123027.971610-2-yukuai1@huaweicloud.com Signed-off-by: Jens Axboe --- include/linux/blk_types.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index d5c5e59ddbd2..f7d40692dd94 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -49,9 +49,10 @@ struct block_device { bool bd_write_holder; bool bd_has_submit_bio; dev_t bd_dev; + struct inode *bd_inode; /* will die */ + atomic_t bd_openers; spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ - struct inode * bd_inode; /* will die */ void * bd_claiming; void * bd_holder; const struct blk_holder_ops *bd_holder_ops; -- cgit v1.2.3 From 67d995e069535c32829f5d368d919063492cec6e Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Tue, 28 Nov 2023 20:30:27 +0800 Subject: block: warn once for each partition in bio_check_ro() Commit 1b0a151c10a6 ("blk-core: use pr_warn_ratelimited() in bio_check_ro()") fix message storm by limit the rate, however, there will still be lots of message in the long term. Fix it better by warn once for each partition. Signed-off-by: Yu Kuai Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20231128123027.971610-3-yukuai1@huaweicloud.com Signed-off-by: Jens Axboe --- block/blk-core.c | 14 +++++++++++--- include/linux/blk_types.h | 1 + 2 files changed, 12 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/block/blk-core.c b/block/blk-core.c index fdf25b8d6e78..2eca76ccf4ee 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -501,9 +501,17 @@ static inline void bio_check_ro(struct bio *bio) if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) return; - pr_warn_ratelimited("Trying to write to read-only block-device %pg\n", - bio->bi_bdev); - /* Older lvm-tools actually trigger this */ + + if (bio->bi_bdev->bd_ro_warned) + return; + + bio->bi_bdev->bd_ro_warned = true; + /* + * Use ioctl to set underlying disk of raid/dm to read-only + * will trigger this. + */ + pr_warn("Trying to write to read-only block-device %pg\n", + bio->bi_bdev); } } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index f7d40692dd94..b29ebd53417d 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -70,6 +70,7 @@ struct block_device { #ifdef CONFIG_FAIL_MAKE_REQUEST bool bd_make_it_fail; #endif + bool bd_ro_warned; /* * keep this out-of-line as it's both big and not needed in the fast * path -- cgit v1.2.3 From febab20caebac959fdc3d7520bc52de8b1184455 Mon Sep 17 00:00:00 2001 From: Wyes Karny Date: Fri, 17 Nov 2023 06:38:39 +0000 Subject: cpufreq/amd-pstate: Fix scaling_min_freq and scaling_max_freq update When amd_pstate is running, writing to scaling_min_freq and scaling_max_freq has no effect. These values are only passed to the policy level, but not to the platform level. This means that the platform does not know about the frequency limits set by the user. To fix this, update the min_perf and max_perf values at the platform level whenever the user changes the scaling_min_freq and scaling_max_freq values. Fixes: ffa5096a7c33 ("cpufreq: amd-pstate: implement Pstate EPP support for the AMD processors") Acked-by: Huang Rui Signed-off-by: Wyes Karny Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/amd-pstate.c | 60 ++++++++++++++++++++++++++++++++++---------- include/linux/amd-pstate.h | 4 +++ 2 files changed, 51 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 300f81d36291..3313d1d2c6dd 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -307,11 +307,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata) highest_perf = AMD_CPPC_HIGHEST_PERF(cap1); WRITE_ONCE(cpudata->highest_perf, highest_perf); - + WRITE_ONCE(cpudata->max_limit_perf, highest_perf); WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1)); WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1)); WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1)); - + WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1)); return 0; } @@ -329,11 +329,12 @@ static int cppc_init_perf(struct amd_cpudata *cpudata) highest_perf = cppc_perf.highest_perf; WRITE_ONCE(cpudata->highest_perf, highest_perf); - + WRITE_ONCE(cpudata->max_limit_perf, highest_perf); WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); WRITE_ONCE(cpudata->lowest_nonlinear_perf, cppc_perf.lowest_nonlinear_perf); WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); + WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf); if (cppc_state == AMD_PSTATE_ACTIVE) return 0; @@ -432,6 +433,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, u64 prev = READ_ONCE(cpudata->cppc_req_cached); u64 value = prev; + min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf, + cpudata->max_limit_perf); + max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf, + cpudata->max_limit_perf); des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) { @@ -470,6 +475,22 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy) return 0; } +static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy) +{ + u32 max_limit_perf, min_limit_perf; + struct amd_cpudata *cpudata = policy->driver_data; + + max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq); + min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq); + + WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf); + WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf); + WRITE_ONCE(cpudata->max_limit_freq, policy->max); + WRITE_ONCE(cpudata->min_limit_freq, policy->min); + + return 0; +} + static int amd_pstate_update_freq(struct cpufreq_policy *policy, unsigned int target_freq, bool fast_switch) { @@ -480,6 +501,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy, if (!cpudata->max_freq) return -ENODEV; + if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) + amd_pstate_update_min_max_limit(policy); + cap_perf = READ_ONCE(cpudata->highest_perf); min_perf = READ_ONCE(cpudata->lowest_perf); max_perf = cap_perf; @@ -534,6 +558,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu, struct amd_cpudata *cpudata = policy->driver_data; unsigned int target_freq; + if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) + amd_pstate_update_min_max_limit(policy); + + cap_perf = READ_ONCE(cpudata->highest_perf); lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); max_freq = READ_ONCE(cpudata->max_freq); @@ -747,6 +775,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) /* Initial processor data capability frequencies */ cpudata->max_freq = max_freq; cpudata->min_freq = min_freq; + cpudata->max_limit_freq = max_freq; + cpudata->min_limit_freq = min_freq; cpudata->nominal_freq = nominal_freq; cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq; @@ -1185,16 +1215,25 @@ static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) return 0; } -static void amd_pstate_epp_init(unsigned int cpu) +static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy) { - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); struct amd_cpudata *cpudata = policy->driver_data; - u32 max_perf, min_perf; + u32 max_perf, min_perf, min_limit_perf, max_limit_perf; u64 value; s16 epp; max_perf = READ_ONCE(cpudata->highest_perf); min_perf = READ_ONCE(cpudata->lowest_perf); + max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq); + min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq); + + max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf, + cpudata->max_limit_perf); + min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf, + cpudata->max_limit_perf); + + WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf); + WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf); value = READ_ONCE(cpudata->cppc_req_cached); @@ -1212,9 +1251,6 @@ static void amd_pstate_epp_init(unsigned int cpu) value &= ~AMD_CPPC_DES_PERF(~0L); value |= AMD_CPPC_DES_PERF(0); - if (cpudata->epp_policy == cpudata->policy) - goto skip_epp; - cpudata->epp_policy = cpudata->policy; /* Get BIOS pre-defined epp value */ @@ -1224,7 +1260,7 @@ static void amd_pstate_epp_init(unsigned int cpu) * This return value can only be negative for shared_memory * systems where EPP register read/write not supported. */ - goto skip_epp; + return; } if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) @@ -1238,8 +1274,6 @@ static void amd_pstate_epp_init(unsigned int cpu) WRITE_ONCE(cpudata->cppc_req_cached, value); amd_pstate_set_epp(cpudata, epp); -skip_epp: - cpufreq_cpu_put(policy); } static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) @@ -1254,7 +1288,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) cpudata->policy = policy->policy; - amd_pstate_epp_init(policy->cpu); + amd_pstate_epp_update_limit(policy); return 0; } diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h index 446394f84606..6ad02ad9c7b4 100644 --- a/include/linux/amd-pstate.h +++ b/include/linux/amd-pstate.h @@ -70,6 +70,10 @@ struct amd_cpudata { u32 nominal_perf; u32 lowest_nonlinear_perf; u32 lowest_perf; + u32 min_limit_perf; + u32 max_limit_perf; + u32 min_limit_freq; + u32 max_limit_freq; u32 max_freq; u32 min_freq; -- cgit v1.2.3 From 8866730aed5100f06d3d965c22f1c61f74942541 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 28 Nov 2023 17:25:56 -0800 Subject: bpf, sockmap: af_unix stream sockets need to hold ref for pair sock AF_UNIX stream sockets are a paired socket. So sending on one of the pairs will lookup the paired socket as part of the send operation. It is possible however to put just one of the pairs in a BPF map. This currently increments the refcnt on the sock in the sockmap to ensure it is not free'd by the stack before sockmap cleans up its state and stops any skbs being sent/recv'd to that socket. But we missed a case. If the peer socket is closed it will be free'd by the stack. However, the paired socket can still be referenced from BPF sockmap side because we hold a reference there. Then if we are sending traffic through BPF sockmap to that socket it will try to dereference the free'd pair in its send logic creating a use after free. And following splat: [59.900375] BUG: KASAN: slab-use-after-free in sk_wake_async+0x31/0x1b0 [59.901211] Read of size 8 at addr ffff88811acbf060 by task kworker/1:2/954 [...] [59.905468] Call Trace: [59.905787] [59.906066] dump_stack_lvl+0x130/0x1d0 [59.908877] print_report+0x16f/0x740 [59.910629] kasan_report+0x118/0x160 [59.912576] sk_wake_async+0x31/0x1b0 [59.913554] sock_def_readable+0x156/0x2a0 [59.914060] unix_stream_sendmsg+0x3f9/0x12a0 [59.916398] sock_sendmsg+0x20e/0x250 [59.916854] skb_send_sock+0x236/0xac0 [59.920527] sk_psock_backlog+0x287/0xaa0 To fix let BPF sockmap hold a refcnt on both the socket in the sockmap and its paired socket. It wasn't obvious how to contain the fix to bpf_unix logic. The primarily problem with keeping this logic in bpf_unix was: In the sock close() we could handle the deref by having a close handler. But, when we are destroying the psock through a map delete operation we wouldn't have gotten any signal thorugh the proto struct other than it being replaced. If we do the deref from the proto replace its too early because we need to deref the sk_pair after the backlog worker has been stopped. Given all this it seems best to just cache it at the end of the psock and eat 8B for the af_unix and vsock users. Notice dgram sockets are OK because they handle locking already. Fixes: 94531cfcbe79 ("af_unix: Add unix_stream_proto for sockmap") Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20231129012557.95371-2-john.fastabend@gmail.com --- include/linux/skmsg.h | 1 + include/net/af_unix.h | 1 + net/core/skmsg.c | 2 ++ net/unix/af_unix.c | 2 -- net/unix/unix_bpf.c | 5 +++++ 5 files changed, 9 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index c1637515a8a4..c953b8c0d2f4 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -106,6 +106,7 @@ struct sk_psock { struct mutex work_mutex; struct sk_psock_work_state work_state; struct delayed_work work; + struct sock *sk_pair; struct rcu_work rwork; }; diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 824c258143a3..49c4640027d8 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -75,6 +75,7 @@ struct unix_sock { }; #define unix_sk(ptr) container_of_const(ptr, struct unix_sock, sk) +#define unix_peer(sk) (unix_sk(sk)->peer) #define peer_wait peer_wq.wait diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 6c31eefbd777..93ecfceac1bc 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -826,6 +826,8 @@ static void sk_psock_destroy(struct work_struct *work) if (psock->sk_redir) sock_put(psock->sk_redir); + if (psock->sk_pair) + sock_put(psock->sk_pair); sock_put(psock->sk); kfree(psock); } diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index a357dc5f2404..ac1f2bc18fc9 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -213,8 +213,6 @@ static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb) } #endif /* CONFIG_SECURITY_NETWORK */ -#define unix_peer(sk) (unix_sk(sk)->peer) - static inline int unix_our_peer(struct sock *sk, struct sock *osk) { return unix_peer(osk) == sk; diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c index 2f9d8271c6ec..7ea7c3a0d0d0 100644 --- a/net/unix/unix_bpf.c +++ b/net/unix/unix_bpf.c @@ -159,12 +159,17 @@ int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool re int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) { + struct sock *sk_pair; + if (restore) { sk->sk_write_space = psock->saved_write_space; sock_replace_proto(sk, psock->sk_proto); return 0; } + sk_pair = unix_peer(sk); + sock_hold(sk_pair); + psock->sk_pair = sk_pair; unix_stream_bpf_check_needs_rebuild(psock->sk_proto); sock_replace_proto(sk, &unix_stream_bpf_prot); return 0; -- cgit v1.2.3 From 4ea95c04fa6b9043a1a301240996aeebe3cb28ec Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 29 Nov 2023 16:10:00 -0800 Subject: vfio: Drop vfio_file_iommu_group() stub to fudge around a KVM wart Drop the vfio_file_iommu_group() stub and instead unconditionally declare the function to fudge around a KVM wart where KVM tries to do symbol_get() on vfio_file_iommu_group() (and other VFIO symbols) even if CONFIG_VFIO=n. Ensuring the symbol is always declared fixes a PPC build error when modules are also disabled, in which case symbol_get() simply points at the address of the symbol (with some attributes shenanigans). Because KVM does symbol_get() instead of directly depending on VFIO, the lack of a fully defined symbol is not problematic (ugly, but "fine"). arch/powerpc/kvm/../../../virt/kvm/vfio.c:89:7: error: attribute declaration must precede definition [-Werror,-Wignored-attributes] fn = symbol_get(vfio_file_iommu_group); ^ include/linux/module.h:805:60: note: expanded from macro 'symbol_get' #define symbol_get(x) ({ extern typeof(x) x __attribute__((weak,visibility("hidden"))); &(x); }) ^ include/linux/vfio.h:294:35: note: previous definition is here static inline struct iommu_group *vfio_file_iommu_group(struct file *file) ^ arch/powerpc/kvm/../../../virt/kvm/vfio.c:89:7: error: attribute declaration must precede definition [-Werror,-Wignored-attributes] fn = symbol_get(vfio_file_iommu_group); ^ include/linux/module.h:805:65: note: expanded from macro 'symbol_get' #define symbol_get(x) ({ extern typeof(x) x __attribute__((weak,visibility("hidden"))); &(x); }) ^ include/linux/vfio.h:294:35: note: previous definition is here static inline struct iommu_group *vfio_file_iommu_group(struct file *file) ^ 2 errors generated. Although KVM is firmly in the wrong (there is zero reason for KVM to build virt/kvm/vfio.c when VFIO is disabled), fudge around the error in VFIO as the stub is unnecessary and doesn't serve its intended purpose (KVM is the only external user of vfio_file_iommu_group()), and there is an in-flight series to clean up the entire KVM<->VFIO interaction, i.e. fixing this in KVM would result in more churn in the long run, and the stub needs to go away regardless. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202308251949.5IiaV0sz-lkp@intel.com Closes: https://lore.kernel.org/oe-kbuild-all/202309030741.82aLACDG-lkp@intel.com Closes: https://lore.kernel.org/oe-kbuild-all/202309110914.QLH0LU6L-lkp@intel.com Link: https://lore.kernel.org/all/0-v1-08396538817d+13c5-vfio_kvm_kconfig_jgg@nvidia.com Link: https://lore.kernel.org/all/20230916003118.2540661-1-seanjc@google.com Cc: Nick Desaulniers Cc: Jason Gunthorpe Tested-by: Michael Ellerman Fixes: c1cce6d079b8 ("vfio: Compile vfio_group infrastructure optionally") Signed-off-by: Sean Christopherson Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20231130001000.543240-1-seanjc@google.com Signed-off-by: Alex Williamson --- include/linux/vfio.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 454e9295970c..a65b2513f8cd 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -289,16 +289,12 @@ void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes, /* * External user API */ -#if IS_ENABLED(CONFIG_VFIO_GROUP) struct iommu_group *vfio_file_iommu_group(struct file *file); + +#if IS_ENABLED(CONFIG_VFIO_GROUP) bool vfio_file_is_group(struct file *file); bool vfio_file_has_dev(struct file *file, struct vfio_device *device); #else -static inline struct iommu_group *vfio_file_iommu_group(struct file *file) -{ - return NULL; -} - static inline bool vfio_file_is_group(struct file *file) { return false; -- cgit v1.2.3 From 0514f63cfff38a0dcb7ba9c5f245827edc0c5107 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Fri, 17 Nov 2023 16:44:17 -0500 Subject: Revert "drm/prime: Unexport helpers for fd/handle conversion" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 71a7974ac7019afeec105a54447ae1dc7216cbb3. These helper functions are needed for KFD to export and import DMABufs the right way without duplicating the tracking of DMABufs associated with GEM objects while ensuring that move notifier callbacks are working as intended. CC: Christian König CC: Thomas Zimmermann Acked-by: Thomas Zimmermann Acked-by: Daniel Vetter Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/drm_prime.c | 33 ++++++++++++++++++--------------- include/drm/drm_prime.h | 7 +++++++ 2 files changed, 25 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 63b709a67471..834a5e28abbe 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -278,7 +278,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf) } EXPORT_SYMBOL(drm_gem_dmabuf_release); -/* +/** * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers * @dev: drm_device to import into * @file_priv: drm file-private structure @@ -292,9 +292,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release); * * Returns 0 on success or a negative error code on failure. */ -static int drm_gem_prime_fd_to_handle(struct drm_device *dev, - struct drm_file *file_priv, int prime_fd, - uint32_t *handle) +int drm_gem_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, int prime_fd, + uint32_t *handle) { struct dma_buf *dma_buf; struct drm_gem_object *obj; @@ -360,6 +360,7 @@ out_put: dma_buf_put(dma_buf); return ret; } +EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -408,7 +409,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev, return dmabuf; } -/* +/** * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers * @dev: dev to export the buffer from * @file_priv: drm file-private structure @@ -421,10 +422,10 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev, * The actual exporting from GEM object to a dma-buf is done through the * &drm_gem_object_funcs.export callback. */ -static int drm_gem_prime_handle_to_fd(struct drm_device *dev, - struct drm_file *file_priv, uint32_t handle, - uint32_t flags, - int *prime_fd) +int drm_gem_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, + uint32_t flags, + int *prime_fd) { struct drm_gem_object *obj; int ret = 0; @@ -506,6 +507,7 @@ out_unlock: return ret; } +EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -864,9 +866,9 @@ EXPORT_SYMBOL(drm_prime_get_contiguous_size); * @obj: GEM object to export * @flags: flags like DRM_CLOEXEC and DRM_RDWR * - * This is the implementation of the &drm_gem_object_funcs.export functions - * for GEM drivers using the PRIME helpers. It is used as the default for - * drivers that do not set their own. + * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers + * using the PRIME helpers. It is used as the default in + * drm_gem_prime_handle_to_fd(). */ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, int flags) @@ -962,9 +964,10 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev); * @dev: drm_device to import into * @dma_buf: dma-buf object to import * - * This is the implementation of the gem_prime_import functions for GEM - * drivers using the PRIME helpers. It is the default for drivers that do - * not set their own &drm_driver.gem_prime_import. + * This is the implementation of the gem_prime_import functions for GEM drivers + * using the PRIME helpers. Drivers can use this as their + * &drm_driver.gem_prime_import implementation. It is used as the default + * implementation in drm_gem_prime_fd_to_handle(). * * Drivers must arrange to call drm_prime_gem_destroy() from their * &drm_gem_object_funcs.free hook when using this function. diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h index a7abf9f3e697..2a1d01e5b56b 100644 --- a/include/drm/drm_prime.h +++ b/include/drm/drm_prime.h @@ -60,12 +60,19 @@ enum dma_data_direction; struct drm_device; struct drm_gem_object; +struct drm_file; /* core prime functions */ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, struct dma_buf_export_info *exp_info); void drm_gem_dmabuf_release(struct dma_buf *dma_buf); +int drm_gem_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, int prime_fd, uint32_t *handle); +int drm_gem_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, uint32_t flags, + int *prime_fd); + /* helper functions for exporting */ int drm_gem_map_attach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach); -- cgit v1.2.3 From d839a656d0f3caca9f96e9bf912fd394ac6a11bc Mon Sep 17 00:00:00 2001 From: JP Kobryn Date: Fri, 1 Dec 2023 14:53:55 +0900 Subject: kprobes: consistent rcu api usage for kretprobe holder It seems that the pointer-to-kretprobe "rp" within the kretprobe_holder is RCU-managed, based on the (non-rethook) implementation of get_kretprobe(). The thought behind this patch is to make use of the RCU API where possible when accessing this pointer so that the needed barriers are always in place and to self-document the code. The __rcu annotation to "rp" allows for sparse RCU checking. Plain writes done to the "rp" pointer are changed to make use of the RCU macro for assignment. For the single read, the implementation of get_kretprobe() is simplified by making use of an RCU macro which accomplishes the same, but note that the log warning text will be more generic. I did find that there is a difference in assembly generated between the usage of the RCU macros vs without. For example, on arm64, when using rcu_assign_pointer(), the corresponding store instruction is a store-release (STLR) which has an implicit barrier. When normal assignment is done, a regular store (STR) is found. In the macro case, this seems to be a result of rcu_assign_pointer() using smp_store_release() when the value to write is not NULL. Link: https://lore.kernel.org/all/20231122132058.3359-1-inwardvessel@gmail.com/ Fixes: d741bf41d7c7 ("kprobes: Remove kretprobe hash") Cc: stable@vger.kernel.org Signed-off-by: JP Kobryn Acked-by: Masami Hiramatsu (Google) Signed-off-by: Masami Hiramatsu (Google) --- include/linux/kprobes.h | 7 ++----- kernel/kprobes.c | 4 ++-- 2 files changed, 4 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index ab1da3142b06..64672bace560 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -139,7 +139,7 @@ static inline bool kprobe_ftrace(struct kprobe *p) * */ struct kretprobe_holder { - struct kretprobe *rp; + struct kretprobe __rcu *rp; struct objpool_head pool; }; @@ -245,10 +245,7 @@ unsigned long kretprobe_trampoline_handler(struct pt_regs *regs, static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri) { - RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(), - "Kretprobe is accessed from instance under preemptive context"); - - return READ_ONCE(ri->rph->rp); + return rcu_dereference_check(ri->rph->rp, rcu_read_lock_any_held()); } static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 075a632e6c7c..d5a0ee40bf66 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2252,7 +2252,7 @@ int register_kretprobe(struct kretprobe *rp) rp->rph = NULL; return -ENOMEM; } - rp->rph->rp = rp; + rcu_assign_pointer(rp->rph->rp, rp); rp->nmissed = 0; /* Establish function entry probe point */ ret = register_kprobe(&rp->kp); @@ -2300,7 +2300,7 @@ void unregister_kretprobes(struct kretprobe **rps, int num) #ifdef CONFIG_KRETPROBE_ON_RETHOOK rethook_free(rps[i]->rh); #else - rps[i]->rph->rp = NULL; + rcu_assign_pointer(rps[i]->rph->rp, NULL); #endif } mutex_unlock(&kprobe_mutex); -- cgit v1.2.3 From a1461f1fd6cfdc4b8917c9d4a91e92605d1f28dc Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Fri, 1 Dec 2023 14:53:56 +0900 Subject: rethook: Use __rcu pointer for rethook::handler Since the rethook::handler is an RCU-maganged pointer so that it will notice readers the rethook is stopped (unregistered) or not, it should be an __rcu pointer and use appropriate functions to be accessed. This will use appropriate memory barrier when accessing it. OTOH, rethook::data is never changed, so we don't need to check it in get_kretprobe(). NOTE: To avoid sparse warning, rethook::handler is defined by a raw function pointer type with __rcu instead of rethook_handler_t. Link: https://lore.kernel.org/all/170126066201.398836.837498688669005979.stgit@devnote2/ Fixes: 54ecbe6f1ed5 ("rethook: Add a generic return hook") Cc: stable@vger.kernel.org Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202311241808.rv9ceuAh-lkp@intel.com/ Tested-by: JP Kobryn Signed-off-by: Masami Hiramatsu (Google) --- include/linux/kprobes.h | 6 ++---- include/linux/rethook.h | 7 ++++++- kernel/trace/rethook.c | 23 ++++++++++++++--------- 3 files changed, 22 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 64672bace560..0ff44d6633e3 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -197,10 +197,8 @@ extern int arch_trampoline_kprobe(struct kprobe *p); #ifdef CONFIG_KRETPROBE_ON_RETHOOK static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri) { - RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(), - "Kretprobe is accessed from instance under preemptive context"); - - return (struct kretprobe *)READ_ONCE(ri->node.rethook->data); + /* rethook::data is non-changed field, so that you can access it freely. */ + return (struct kretprobe *)ri->node.rethook->data; } static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri) { diff --git a/include/linux/rethook.h b/include/linux/rethook.h index ce69b2b7bc35..ba60962805f6 100644 --- a/include/linux/rethook.h +++ b/include/linux/rethook.h @@ -28,7 +28,12 @@ typedef void (*rethook_handler_t) (struct rethook_node *, void *, unsigned long, */ struct rethook { void *data; - rethook_handler_t handler; + /* + * To avoid sparse warnings, this uses a raw function pointer with + * __rcu, instead of rethook_handler_t. But this must be same as + * rethook_handler_t. + */ + void (__rcu *handler) (struct rethook_node *, void *, unsigned long, struct pt_regs *); struct objpool_head pool; struct rcu_head rcu; }; diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c index 6fd7d4ecbbc6..fa03094e9e69 100644 --- a/kernel/trace/rethook.c +++ b/kernel/trace/rethook.c @@ -48,7 +48,7 @@ static void rethook_free_rcu(struct rcu_head *head) */ void rethook_stop(struct rethook *rh) { - WRITE_ONCE(rh->handler, NULL); + rcu_assign_pointer(rh->handler, NULL); } /** @@ -63,7 +63,7 @@ void rethook_stop(struct rethook *rh) */ void rethook_free(struct rethook *rh) { - WRITE_ONCE(rh->handler, NULL); + rethook_stop(rh); call_rcu(&rh->rcu, rethook_free_rcu); } @@ -82,6 +82,12 @@ static int rethook_fini_pool(struct objpool_head *head, void *context) return 0; } +static inline rethook_handler_t rethook_get_handler(struct rethook *rh) +{ + return (rethook_handler_t)rcu_dereference_check(rh->handler, + rcu_read_lock_any_held()); +} + /** * rethook_alloc() - Allocate struct rethook. * @data: a data to pass the @handler when hooking the return. @@ -107,7 +113,7 @@ struct rethook *rethook_alloc(void *data, rethook_handler_t handler, return ERR_PTR(-ENOMEM); rh->data = data; - rh->handler = handler; + rcu_assign_pointer(rh->handler, handler); /* initialize the objpool for rethook nodes */ if (objpool_init(&rh->pool, num, size, GFP_KERNEL, rh, @@ -135,9 +141,10 @@ static void free_rethook_node_rcu(struct rcu_head *head) */ void rethook_recycle(struct rethook_node *node) { - lockdep_assert_preemption_disabled(); + rethook_handler_t handler; - if (likely(READ_ONCE(node->rethook->handler))) + handler = rethook_get_handler(node->rethook); + if (likely(handler)) objpool_push(node, &node->rethook->pool); else call_rcu(&node->rcu, free_rethook_node_rcu); @@ -153,9 +160,7 @@ NOKPROBE_SYMBOL(rethook_recycle); */ struct rethook_node *rethook_try_get(struct rethook *rh) { - rethook_handler_t handler = READ_ONCE(rh->handler); - - lockdep_assert_preemption_disabled(); + rethook_handler_t handler = rethook_get_handler(rh); /* Check whether @rh is going to be freed. */ if (unlikely(!handler)) @@ -300,7 +305,7 @@ unsigned long rethook_trampoline_handler(struct pt_regs *regs, rhn = container_of(first, struct rethook_node, llist); if (WARN_ON_ONCE(rhn->frame != frame)) break; - handler = READ_ONCE(rhn->rethook->handler); + handler = rethook_get_handler(rhn->rethook); if (handler) handler(rhn, rhn->rethook->data, correct_ret_addr, regs); -- cgit v1.2.3