summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/arm64/kernel/vdso32/Makefile2
-rw-r--r--arch/xtensa/include/asm/checksum.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c10
-rw-r--r--drivers/gpu/drm/lima/lima_pp.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c2
-rw-r--r--drivers/i2c/busses/i2c-cadence.c28
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c6
-rw-r--r--drivers/i2c/busses/i2c-rcar.c3
-rw-r--r--drivers/infiniband/core/cm.c2
-rw-r--r--drivers/infiniband/core/rdma_core.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c34
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c2
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c22
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c4
-rw-r--r--drivers/iommu/qcom_iommu.c37
-rw-r--r--drivers/md/dm-integrity.c4
-rw-r--r--drivers/md/dm.c17
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c2
-rw-r--r--fs/btrfs/backref.c1
-rw-r--r--fs/btrfs/extent_io.c3
-rw-r--r--fs/btrfs/inode.c23
-rw-r--r--fs/btrfs/volumes.c8
-rw-r--r--fs/io_uring.c61
-rw-r--r--fs/zonefs/super.c18
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--include/linux/i2c.h2
28 files changed, 190 insertions, 123 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index ef39a48f63e4..f0569cf304ca 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11252,7 +11252,7 @@ S: Maintained
F: drivers/crypto/atmel-ecc.*
MICROCHIP I2C DRIVER
-M: Ludovic Desroches <ludovic.desroches@microchip.com>
+M: Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
L: linux-i2c@vger.kernel.org
S: Supported
F: drivers/i2c/busses/i2c-at91-*.c
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index d88148bef6b0..5139a5f19256 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -14,7 +14,7 @@ COMPAT_GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE_COMPAT)elfedit))
COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..)
CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%))
-CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)
+CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE_COMPAT))
CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments
ifneq ($(COMPAT_GCC_TOOLCHAIN),)
CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN)
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h
index d8292cc9ebdf..243a5fe79d3c 100644
--- a/arch/xtensa/include/asm/checksum.h
+++ b/arch/xtensa/include/asm/checksum.h
@@ -57,7 +57,7 @@ static inline
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
{
- if (access_ok(dst, len))
+ if (access_ok(src, len))
return csum_partial_copy_generic((__force const void *)src, dst,
len, sum, err_ptr, NULL);
if (len)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 02e6f8c4dde0..ebb8a28ff002 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -778,8 +778,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
while (isspace(*++tmp_str));
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
+ while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@@ -1039,8 +1038,7 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
memcpy(buf_cpy, buf, bytes);
buf_cpy[bytes] = '\0';
tmp = buf_cpy;
- while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
+ while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
if (strlen(sub_str)) {
ret = kstrtol(sub_str, 0, &level);
if (ret)
@@ -1637,8 +1635,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
i++;
memcpy(buf_cpy, buf, count-i);
tmp_str = buf_cpy;
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
+ while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 3da71a088b92..0ecc18b55ffb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -644,9 +644,6 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
/* sclk is bigger than max sclk in the dependence table */
*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i - 1].vddc -
- (uint16_t)VDDC_VDDCI_DELTA));
if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
*voltage |= (data->vbios_boot_state.vddci_bootup_value *
@@ -654,8 +651,13 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
else if (dep_table->entries[i - 1].vddci)
*voltage |= (dep_table->entries[i - 1].vddci *
VOLTAGE_SCALE) << VDDC_SHIFT;
- else
+ else {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i - 1].vddc -
+ (uint16_t)VDDC_VDDCI_DELTA));
+
*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
index 33f01383409c..a5c95bed08c0 100644
--- a/drivers/gpu/drm/lima/lima_pp.c
+++ b/drivers/gpu/drm/lima/lima_pp.c
@@ -271,6 +271,8 @@ void lima_pp_fini(struct lima_ip *ip)
int lima_pp_bcast_resume(struct lima_ip *ip)
{
+ /* PP has been reset by individual PP resume */
+ ip->data.async_reset = false;
return 0;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 557cbe5ab35f..2f2c9f0a1071 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -260,7 +260,7 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
unsigned long reg;
reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
- if (reg & SUN4I_HDMI_HPD_HIGH) {
+ if (!(reg & SUN4I_HDMI_HPD_HIGH)) {
cec_phys_addr_invalidate(hdmi->cec_adap);
return connector_status_disconnected;
}
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 4b72398af505..e4b7f2a951ad 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -421,20 +421,21 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
/* Read data if receive data valid is set */
while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
CDNS_I2C_SR_RXDV) {
- /*
- * Clear hold bit that was set for FIFO control if
- * RX data left is less than FIFO depth, unless
- * repeated start is selected.
- */
- if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) &&
- !id->bus_hold_flag)
- cdns_i2c_clear_bus_hold(id);
-
if (id->recv_count > 0) {
*(id->p_recv_buf)++ =
cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
id->recv_count--;
id->curr_recv_count--;
+
+ /*
+ * Clear hold bit that was set for FIFO control
+ * if RX data left is less than or equal to
+ * FIFO DEPTH unless repeated start is selected
+ */
+ if (id->recv_count <= CDNS_I2C_FIFO_DEPTH &&
+ !id->bus_hold_flag)
+ cdns_i2c_clear_bus_hold(id);
+
} else {
dev_err(id->adap.dev.parent,
"xfer_size reg rollover. xfer aborted!\n");
@@ -594,10 +595,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
- if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
+ if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
ctrl_reg |= CDNS_I2C_CR_HOLD;
- else
- ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
@@ -654,11 +653,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
- if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
+ if (id->send_count > CDNS_I2C_FIFO_DEPTH)
ctrl_reg |= CDNS_I2C_CR_HOLD;
- else
- ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
-
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
/* Clear the interrupts in interrupt status register. */
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 18d1e4fd4cf3..7f130829bf01 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -367,7 +367,6 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
geni_se_select_mode(se, GENI_SE_FIFO);
writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN);
- geni_se_setup_m_cmd(se, I2C_READ, m_param);
if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) {
geni_se_select_mode(se, GENI_SE_FIFO);
@@ -375,6 +374,8 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_buf = NULL;
}
+ geni_se_setup_m_cmd(se, I2C_READ, m_param);
+
time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
if (!time_left)
geni_i2c_abort_xfer(gi2c);
@@ -408,7 +409,6 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
geni_se_select_mode(se, GENI_SE_FIFO);
writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN);
- geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) {
geni_se_select_mode(se, GENI_SE_FIFO);
@@ -416,6 +416,8 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_buf = NULL;
}
+ geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
+
if (!dma_buf) /* Get FIFO IRQ */
writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index a45c4bf1ec01..2e3e1bb75013 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -868,6 +868,7 @@ static int rcar_unreg_slave(struct i2c_client *slave)
/* disable irqs and ensure none is running before clearing ptr */
rcar_i2c_write(priv, ICSIER, 0);
rcar_i2c_write(priv, ICSCR, 0);
+ rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
synchronize_irq(priv->irq);
priv->slave = NULL;
@@ -969,6 +970,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
goto out_pm_put;
+ rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
+
if (priv->devtype == I2C_RCAR_GEN3) {
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (!IS_ERR(priv->rstc)) {
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 0d1377232933..dc0558b23158 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3676,10 +3676,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
return ret;
}
cm_id_priv->id.state = IB_CM_IDLE;
+ spin_lock_irq(&cm.lock);
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
}
+ spin_unlock_irq(&cm.lock);
return 0;
}
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 3027cd2fb247..6d3ed7c6e19e 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -649,9 +649,6 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
{
struct ib_uverbs_file *ufile = attrs->ufile;
- /* alloc_commit consumes the uobj kref */
- uobj->uapi_object->type_class->alloc_commit(uobj);
-
/* kref is held so long as the uobj is on the uobj list. */
uverbs_uobject_get(uobj);
spin_lock_irq(&ufile->uobjects_lock);
@@ -661,6 +658,9 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
/* matches atomic_set(-1) in alloc_uobj */
atomic_set(&uobj->usecnt, 0);
+ /* alloc_commit consumes the uobj kref */
+ uobj->uapi_object->type_class->alloc_commit(uobj);
+
/* Matches the down_read in rdma_alloc_begin_uobject */
up_read(&ufile->hw_destroy_rwsem);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index dd01a51816cc..0618ced45bf8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -3954,6 +3954,15 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
return 0;
}
+static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr)
+{
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
+ return IB_MTU_4096;
+
+ return attr->path_mtu;
+}
+
static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
struct hns_roce_v2_qp_context *context,
@@ -3965,6 +3974,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t trrl_ba;
dma_addr_t irrl_ba;
+ enum ib_mtu mtu;
u8 port_num;
u64 *mtts;
u8 *dmac;
@@ -4062,23 +4072,23 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, 0);
- /* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
+ mtu = get_mtu(ibqp, attr);
+
+ if (attr_mask & IB_QP_PATH_MTU) {
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, mtu);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, 0);
+ }
+
+#define MAX_LP_MSG_LEN 65536
+ /* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S,
- ilog2(hr_dev->caps.max_sq_inline / IB_MTU_4096));
+ ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu)));
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
- else if (attr_mask & IB_QP_PATH_MTU)
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
-
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, 0);
-
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 0e71ebee9e52..6b226a5eb7db 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -120,7 +120,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
buf_attr.page_shift = is_fast ? PAGE_SHIFT :
- hr_dev->caps.pbl_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
buf_attr.region[0].size = length;
buf_attr.region[0].hopnum = mr->pbl_hop_num;
buf_attr.region_count = 1;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 7d2ec9ee5097..1ab676b66894 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -601,6 +601,23 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
*/
synchronize_srcu(&dev->odp_srcu);
+ /*
+ * All work on the prefetch list must be completed, xa_erase() prevented
+ * new work from being created.
+ */
+ wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
+
+ /*
+ * At this point it is forbidden for any other thread to enter
+ * pagefault_mr() on this imr. It is already forbidden to call
+ * pagefault_mr() on an implicit child. Due to this additions to
+ * implicit_children are prevented.
+ */
+
+ /*
+ * Block destroy_unused_implicit_child_mr() from incrementing
+ * num_deferred_work.
+ */
xa_lock(&imr->implicit_children);
xa_for_each (&imr->implicit_children, idx, mtt) {
__xa_erase(&imr->implicit_children, idx);
@@ -609,9 +626,8 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
xa_unlock(&imr->implicit_children);
/*
- * num_deferred_work can only be incremented inside the odp_srcu, or
- * under xa_lock while the child is in the xarray. Thus at this point
- * it is only decreasing, and all work holding it is now on the wq.
+ * Wait for any concurrent destroy_unused_implicit_child_mr() to
+ * complete.
*/
wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index 6f5eadc4d183..37aaacebd3f2 100644
--- a/drivers/infiniband/hw/mlx5/srq_cmd.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -83,11 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
struct mlx5_srq_table *table = &dev->srq_table;
struct mlx5_core_srq *srq;
- xa_lock(&table->array);
+ xa_lock_irq(&table->array);
srq = xa_load(&table->array, srqn);
if (srq)
refcount_inc(&srq->common.refcount);
- xa_unlock(&table->array);
+ xa_unlock_irq(&table->array);
return srq;
}
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index c3e1fbd1988c..d176df569af8 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -65,6 +65,7 @@ struct qcom_iommu_domain {
struct mutex init_mutex; /* Protects iommu pointer */
struct iommu_domain domain;
struct qcom_iommu_dev *iommu;
+ struct iommu_fwspec *fwspec;
};
static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
@@ -84,9 +85,9 @@ static struct qcom_iommu_dev * to_iommu(struct device *dev)
return dev_iommu_priv_get(dev);
}
-static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid)
+static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
{
- struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
+ struct qcom_iommu_dev *qcom_iommu = d->iommu;
if (!qcom_iommu)
return NULL;
return qcom_iommu->ctxs[asid - 1];
@@ -118,14 +119,12 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
static void qcom_iommu_tlb_sync(void *cookie)
{
- struct iommu_fwspec *fwspec;
- struct device *dev = cookie;
+ struct qcom_iommu_domain *qcom_domain = cookie;
+ struct iommu_fwspec *fwspec = qcom_domain->fwspec;
unsigned i;
- fwspec = dev_iommu_fwspec_get(dev);
-
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
unsigned int val, ret;
iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
@@ -139,14 +138,12 @@ static void qcom_iommu_tlb_sync(void *cookie)
static void qcom_iommu_tlb_inv_context(void *cookie)
{
- struct device *dev = cookie;
- struct iommu_fwspec *fwspec;
+ struct qcom_iommu_domain *qcom_domain = cookie;
+ struct iommu_fwspec *fwspec = qcom_domain->fwspec;
unsigned i;
- fwspec = dev_iommu_fwspec_get(dev);
-
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
}
@@ -156,16 +153,14 @@ static void qcom_iommu_tlb_inv_context(void *cookie)
static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
{
- struct device *dev = cookie;
- struct iommu_fwspec *fwspec;
+ struct qcom_iommu_domain *qcom_domain = cookie;
+ struct iommu_fwspec *fwspec = qcom_domain->fwspec;
unsigned i, reg;
reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
- fwspec = dev_iommu_fwspec_get(dev);
-
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
size_t s = size;
iova = (iova >> 12) << 12;
@@ -256,7 +251,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
};
qcom_domain->iommu = qcom_iommu;
- pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev);
+ qcom_domain->fwspec = fwspec;
+
+ pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain);
if (!pgtbl_ops) {
dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
ret = -ENOMEM;
@@ -269,7 +266,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
domain->geometry.force_aperture = true;
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
if (!ctx->secure_init) {
ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
@@ -419,7 +416,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
pm_runtime_get_sync(qcom_iommu->dev);
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
/* Disable the context bank: */
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 81dc5ff08909..a83a1de1e03f 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2420,7 +2420,7 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */
- if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
+ if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
return;
spin_lock_irq(&ic->endio_wait.lock);
@@ -2481,7 +2481,7 @@ static void integrity_recalc(struct work_struct *w)
next_chunk:
- if (unlikely(dm_suspended(ic->ti)))
+ if (unlikely(dm_post_suspending(ic->ti)))
goto unlock_ret;
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 52449afd58eb..5b9de2f71bb0 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -143,6 +143,7 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
#define DMF_NOFLUSH_SUSPENDING 5
#define DMF_DEFERRED_REMOVE 6
#define DMF_SUSPENDED_INTERNALLY 7
+#define DMF_POST_SUSPENDING 8
#define DM_NUMA_NODE NUMA_NO_NODE
static int dm_numa_node = DM_NUMA_NODE;
@@ -2408,6 +2409,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
set_bit(DMF_SUSPENDED, &md->flags);
+ set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
}
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
@@ -2766,7 +2768,9 @@ retry:
if (r)
goto out_unlock;
+ set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
+ clear_bit(DMF_POST_SUSPENDING, &md->flags);
out_unlock:
mutex_unlock(&md->suspend_lock);
@@ -2863,7 +2867,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
DMF_SUSPENDED_INTERNALLY);
+ set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
+ clear_bit(DMF_POST_SUSPENDING, &md->flags);
}
static void __dm_internal_resume(struct mapped_device *md)
@@ -3024,6 +3030,11 @@ int dm_suspended_md(struct mapped_device *md)
return test_bit(DMF_SUSPENDED, &md->flags);
}
+static int dm_post_suspending_md(struct mapped_device *md)
+{
+ return test_bit(DMF_POST_SUSPENDING, &md->flags);
+}
+
int dm_suspended_internally_md(struct mapped_device *md)
{
return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
@@ -3040,6 +3051,12 @@ int dm_suspended(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_suspended);
+int dm_post_suspending(struct dm_target *ti)
+{
+ return dm_post_suspending_md(dm_table_get_md(ti->table));
+}
+EXPORT_SYMBOL_GPL(dm_post_suspending);
+
int dm_noflush_suspending(struct dm_target *ti)
{
return __noflush_suspending(dm_table_get_md(ti->table));
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index 56912e30c47e..a1bcc0f4ba9e 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -68,7 +68,7 @@ static void aspeed_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
if (WARN_ON(clock > host->max_clk))
clock = host->max_clk;
- for (div = 1; div < 256; div *= 2) {
+ for (div = 2; div < 256; div *= 2) {
if ((parent / div) <= clock)
break;
}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index d888e71e66b6..ea10f7bc99ab 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1461,6 +1461,7 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
if (ret < 0 && ret != -ENOENT) {
ulist_free(tmp);
ulist_free(*roots);
+ *roots = NULL;
return ret;
}
node = ulist_next(tmp, &uiter);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 608f93438b29..60278e52c37a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1999,7 +1999,8 @@ static int __process_pages_contig(struct address_space *mapping,
if (!PageDirty(pages[i]) ||
pages[i]->mapping != mapping) {
unlock_page(pages[i]);
- put_page(pages[i]);
+ for (; i < ret; i++)
+ put_page(pages[i]);
err = -EAGAIN;
goto out;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 43c803c16b48..6862cd7e21a9 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8123,20 +8123,17 @@ again:
/*
* Qgroup reserved space handler
* Page here will be either
- * 1) Already written to disk
- * In this case, its reserved space is released from data rsv map
- * and will be freed by delayed_ref handler finally.
- * So even we call qgroup_free_data(), it won't decrease reserved
- * space.
- * 2) Not written to disk
- * This means the reserved space should be freed here. However,
- * if a truncate invalidates the page (by clearing PageDirty)
- * and the page is accounted for while allocating extent
- * in btrfs_check_data_free_space() we let delayed_ref to
- * free the entire extent.
+ * 1) Already written to disk or ordered extent already submitted
+ * Then its QGROUP_RESERVED bit in io_tree is already cleaned.
+ * Qgroup will be handled by its qgroup_record then.
+ * btrfs_qgroup_free_data() call will do nothing here.
+ *
+ * 2) Not written to disk yet
+ * Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED
+ * bit of its io_tree, and free the qgroup reserved data space.
+ * Since the IO will never happen for this page.
*/
- if (PageDirty(page))
- btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
+ btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
if (!inode_evicting) {
clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED |
EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0d6e785bcb98..f403fb1e6d37 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -7052,6 +7052,14 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
mutex_lock(&fs_info->chunk_mutex);
/*
+ * It is possible for mount and umount to race in such a way that
+ * we execute this code path, but open_fs_devices failed to clear
+ * total_rw_bytes. We certainly want it cleared before reading the
+ * device items, so clear it here.
+ */
+ fs_info->fs_devices->total_rw_bytes = 0;
+
+ /*
* Read all device items, and then all the chunk items. All
* device items are found before any chunk item (their object id
* is smaller than the lowest possible object id for a chunk
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 74bc4a04befa..32b0064f806e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -605,6 +605,7 @@ enum {
struct async_poll {
struct io_poll_iocb poll;
+ struct io_poll_iocb *double_poll;
struct io_wq_work work;
};
@@ -4159,9 +4160,9 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
return false;
}
-static void io_poll_remove_double(struct io_kiocb *req)
+static void io_poll_remove_double(struct io_kiocb *req, void *data)
{
- struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io;
+ struct io_poll_iocb *poll = data;
lockdep_assert_held(&req->ctx->completion_lock);
@@ -4181,7 +4182,7 @@ static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
{
struct io_ring_ctx *ctx = req->ctx;
- io_poll_remove_double(req);
+ io_poll_remove_double(req, req->io);
req->poll.done = true;
io_cqring_fill_event(req, error ? error : mangle_poll(mask));
io_commit_cqring(ctx);
@@ -4224,21 +4225,21 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
int sync, void *key)
{
struct io_kiocb *req = wait->private;
- struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io;
+ struct io_poll_iocb *poll = req->apoll->double_poll;
__poll_t mask = key_to_poll(key);
/* for instances that support it check for an event match first: */
if (mask && !(mask & poll->events))
return 0;
- if (req->poll.head) {
+ if (poll && poll->head) {
bool done;
- spin_lock(&req->poll.head->lock);
- done = list_empty(&req->poll.wait.entry);
+ spin_lock(&poll->head->lock);
+ done = list_empty(&poll->wait.entry);
if (!done)
- list_del_init(&req->poll.wait.entry);
- spin_unlock(&req->poll.head->lock);
+ list_del_init(&poll->wait.entry);
+ spin_unlock(&poll->head->lock);
if (!done)
__io_async_wake(req, poll, mask, io_poll_task_func);
}
@@ -4258,7 +4259,8 @@ static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
}
static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
- struct wait_queue_head *head)
+ struct wait_queue_head *head,
+ struct io_poll_iocb **poll_ptr)
{
struct io_kiocb *req = pt->req;
@@ -4269,7 +4271,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
*/
if (unlikely(poll->head)) {
/* already have a 2nd entry, fail a third attempt */
- if (req->io) {
+ if (*poll_ptr) {
pt->error = -EINVAL;
return;
}
@@ -4281,7 +4283,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake);
refcount_inc(&req->refs);
poll->wait.private = req;
- req->io = (void *) poll;
+ *poll_ptr = poll;
}
pt->error = 0;
@@ -4293,8 +4295,9 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
struct poll_table_struct *p)
{
struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
+ struct async_poll *apoll = pt->req->apoll;
- __io_queue_proc(&pt->req->apoll->poll, pt, head);
+ __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
}
static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
@@ -4344,11 +4347,13 @@ static void io_async_task_func(struct callback_head *cb)
}
}
+ io_poll_remove_double(req, apoll->double_poll);
spin_unlock_irq(&ctx->completion_lock);
/* restore ->work in case we need to retry again */
if (req->flags & REQ_F_WORK_INITIALIZED)
memcpy(&req->work, &apoll->work, sizeof(req->work));
+ kfree(apoll->double_poll);
kfree(apoll);
if (!canceled) {
@@ -4436,7 +4441,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
struct async_poll *apoll;
struct io_poll_table ipt;
__poll_t mask, ret;
- bool had_io;
if (!req->file || !file_can_poll(req->file))
return false;
@@ -4448,11 +4452,11 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
if (unlikely(!apoll))
return false;
+ apoll->double_poll = NULL;
req->flags |= REQ_F_POLLED;
if (req->flags & REQ_F_WORK_INITIALIZED)
memcpy(&apoll->work, &req->work, sizeof(req->work));
- had_io = req->io != NULL;
io_get_req_task(req);
req->apoll = apoll;
@@ -4470,13 +4474,11 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
io_async_wake);
if (ret) {
- ipt.error = 0;
- /* only remove double add if we did it here */
- if (!had_io)
- io_poll_remove_double(req);
+ io_poll_remove_double(req, apoll->double_poll);
spin_unlock_irq(&ctx->completion_lock);
if (req->flags & REQ_F_WORK_INITIALIZED)
memcpy(&req->work, &apoll->work, sizeof(req->work));
+ kfree(apoll->double_poll);
kfree(apoll);
return false;
}
@@ -4507,11 +4509,13 @@ static bool io_poll_remove_one(struct io_kiocb *req)
bool do_complete;
if (req->opcode == IORING_OP_POLL_ADD) {
- io_poll_remove_double(req);
+ io_poll_remove_double(req, req->io);
do_complete = __io_poll_remove_one(req, &req->poll);
} else {
struct async_poll *apoll = req->apoll;
+ io_poll_remove_double(req, apoll->double_poll);
+
/* non-poll requests have submit ref still */
do_complete = __io_poll_remove_one(req, &apoll->poll);
if (do_complete) {
@@ -4524,6 +4528,7 @@ static bool io_poll_remove_one(struct io_kiocb *req)
if (req->flags & REQ_F_WORK_INITIALIZED)
memcpy(&req->work, &apoll->work,
sizeof(req->work));
+ kfree(apoll->double_poll);
kfree(apoll);
}
}
@@ -4624,7 +4629,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
{
struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
- __io_queue_proc(&pt->req->poll, pt, head);
+ __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->io);
}
static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -4732,7 +4737,9 @@ static int io_timeout_remove_prep(struct io_kiocb *req,
{
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
+ if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
+ return -EINVAL;
+ if (sqe->ioprio || sqe->buf_index || sqe->len)
return -EINVAL;
req->timeout.addr = READ_ONCE(sqe->addr);
@@ -4910,8 +4917,9 @@ static int io_async_cancel_prep(struct io_kiocb *req,
{
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
- sqe->cancel_flags)
+ if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
+ return -EINVAL;
+ if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
return -EINVAL;
req->cancel.addr = READ_ONCE(sqe->addr);
@@ -4929,7 +4937,9 @@ static int io_async_cancel(struct io_kiocb *req)
static int io_files_update_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
- if (sqe->flags || sqe->ioprio || sqe->rw_flags)
+ if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
+ return -EINVAL;
+ if (sqe->ioprio || sqe->rw_flags)
return -EINVAL;
req->files_update.offset = READ_ONCE(sqe->off);
@@ -5720,6 +5730,7 @@ fail_req:
* Never try inline submit of IOSQE_ASYNC is set, go straight
* to async execution.
*/
+ io_req_init_async(req);
req->work.flags |= IO_WQ_WORK_CONCURRENT;
io_queue_async_work(req);
} else {
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 07bc42d62673..abfb17f88f9a 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -607,14 +607,14 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
int nr_pages;
ssize_t ret;
- nr_pages = iov_iter_npages(from, BIO_MAX_PAGES);
- if (!nr_pages)
- return 0;
-
max = queue_max_zone_append_sectors(bdev_get_queue(bdev));
max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
iov_iter_truncate(from, max);
+ nr_pages = iov_iter_npages(from, BIO_MAX_PAGES);
+ if (!nr_pages)
+ return 0;
+
bio = bio_alloc_bioset(GFP_NOFS, nr_pages, &fs_bio_set);
if (!bio)
return -ENOMEM;
@@ -1119,7 +1119,7 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
char *file_name;
struct dentry *dir;
unsigned int n = 0;
- int ret = -ENOMEM;
+ int ret;
/* If the group is empty, there is nothing to do */
if (!zd->nr_zones[type])
@@ -1135,8 +1135,10 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
zgroup_name = "seq";
dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type);
- if (!dir)
+ if (!dir) {
+ ret = -ENOMEM;
goto free;
+ }
/*
* The first zone contains the super block: skip it.
@@ -1174,8 +1176,10 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
* Use the file number within its group as file name.
*/
snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n);
- if (!zonefs_create_inode(dir, file_name, zone, type))
+ if (!zonefs_create_inode(dir, file_name, zone, type)) {
+ ret = -ENOMEM;
goto free;
+ }
n++;
}
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 8750f2dc5613..73dec4b5d5be 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -426,6 +426,7 @@ const char *dm_device_name(struct mapped_device *md);
int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
+int dm_post_suspending(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
union map_info *dm_get_rq_mapinfo(struct request *rq);
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index b8b8963f8bb9..ee328cf80bd9 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -56,7 +56,7 @@ struct property_entry;
* on a bus (or read from them). Apart from two basic transfer functions to
* transmit one message at a time, a more complex version can be used to
* transmit an arbitrary number of messages without interruption.
- * @count must be be less than 64k since msg.len is u16.
+ * @count must be less than 64k since msg.len is u16.
*/
int i2c_transfer_buffer_flags(const struct i2c_client *client,
char *buf, int count, u16 flags);