From 4f7c2e0d8765a0266b920c66ffc495fde44c1ec8 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 28 May 2019 18:56:20 +0300 Subject: thunderbolt: Make sure device runtime resume completes before taking domain lock When a device is authorized from userspace by writing to authorized attribute we first take the domain lock and then runtime resume the device in question. There are two issues with this. First is that the device connected notifications are blocked during this time which means we get them only after the authorization operation is complete. Because of this the authorization needed flag from the firmware notification is not reflecting the real authorization status anymore. So what happens is that the "authorized" keeps returning 0 even if the device was already authorized properly. Second issue is that each time the controller is runtime resumed the connection_id field of device connected notification may be different than in the previous resume. We need to use the latest connection_id otherwise the firmware rejects the authorization command. Fix these by moving runtime resume operations to happen before the domain lock is taken, and waiting for the updated device connected notification from the firmware before we allow runtime resume of a device to complete. While there add missing locking to tb_switch_nvm_read(). Fixes: 09f11b6c99fe ("thunderbolt: Take domain lock in switch sysfs attribute callbacks") Reported-by: Pengfei Xu Signed-off-by: Mika Westerberg --- drivers/thunderbolt/icm.c | 54 +++++++++++++++++++++++++++++++++++++++++++- drivers/thunderbolt/switch.c | 45 ++++++++++++++++++++++++++---------- drivers/thunderbolt/tb.h | 7 ++++++ 3 files changed, 93 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index f1c10378fa3e..40a8960b9a7e 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -484,6 +484,7 @@ static void add_switch(struct tb_switch *parent_sw, u64 route, sw->authorized = authorized; sw->security_level = security_level; sw->boot = boot; + init_completion(&sw->rpm_complete); vss = parse_intel_vss(ep_name, ep_name_size); if (vss) @@ -523,6 +524,9 @@ static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw, /* This switch still exists */ sw->is_unplugged = false; + + /* Runtime resume is now complete */ + complete(&sw->rpm_complete); } static void remove_switch(struct tb_switch *sw) @@ -1770,6 +1774,32 @@ static void icm_unplug_children(struct tb_switch *sw) } } +static int complete_rpm(struct device *dev, void *data) +{ + struct tb_switch *sw = tb_to_switch(dev); + + if (sw) + complete(&sw->rpm_complete); + return 0; +} + +static void remove_unplugged_switch(struct tb_switch *sw) +{ + pm_runtime_get_sync(sw->dev.parent); + + /* + * Signal this and switches below for rpm_complete because + * tb_switch_remove() calls pm_runtime_get_sync() that then waits + * for it. + */ + complete_rpm(&sw->dev, NULL); + bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm); + tb_switch_remove(sw); + + pm_runtime_mark_last_busy(sw->dev.parent); + pm_runtime_put_autosuspend(sw->dev.parent); +} + static void icm_free_unplugged_children(struct tb_switch *sw) { unsigned int i; @@ -1782,7 +1812,7 @@ static void icm_free_unplugged_children(struct tb_switch *sw) port->xdomain = NULL; } else if (tb_port_has_remote(port)) { if (port->remote->sw->is_unplugged) { - tb_switch_remove(port->remote->sw); + remove_unplugged_switch(port->remote->sw); port->remote = NULL; } else { icm_free_unplugged_children(port->remote->sw); @@ -1831,6 +1861,24 @@ static int icm_runtime_suspend(struct tb *tb) return 0; } +static int icm_runtime_suspend_switch(struct tb_switch *sw) +{ + if (tb_route(sw)) + reinit_completion(&sw->rpm_complete); + return 0; +} + +static int icm_runtime_resume_switch(struct tb_switch *sw) +{ + if (tb_route(sw)) { + if (!wait_for_completion_timeout(&sw->rpm_complete, + msecs_to_jiffies(500))) { + dev_dbg(&sw->dev, "runtime resuming timed out\n"); + } + } + return 0; +} + static int icm_runtime_resume(struct tb *tb) { /* @@ -1910,6 +1958,8 @@ static const struct tb_cm_ops icm_ar_ops = { .complete = icm_complete, .runtime_suspend = icm_runtime_suspend, .runtime_resume = icm_runtime_resume, + .runtime_suspend_switch = icm_runtime_suspend_switch, + .runtime_resume_switch = icm_runtime_resume_switch, .handle_event = icm_handle_event, .get_boot_acl = icm_ar_get_boot_acl, .set_boot_acl = icm_ar_set_boot_acl, @@ -1930,6 +1980,8 @@ static const struct tb_cm_ops icm_tr_ops = { .complete = icm_complete, .runtime_suspend = icm_runtime_suspend, .runtime_resume = icm_runtime_resume, + .runtime_suspend_switch = icm_runtime_suspend_switch, + .runtime_resume_switch = icm_runtime_resume_switch, .handle_event = icm_handle_event, .get_boot_acl = icm_ar_get_boot_acl, .set_boot_acl = icm_ar_set_boot_acl, diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index c1b016574fb4..10b56c66fec3 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -239,7 +239,16 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, int ret; pm_runtime_get_sync(&sw->dev); + + if (!mutex_trylock(&sw->tb->lock)) { + ret = restart_syscall(); + goto out; + } + ret = dma_port_flash_read(sw->dma_port, offset, val, bytes); + mutex_unlock(&sw->tb->lock); + +out: pm_runtime_mark_last_busy(&sw->dev); pm_runtime_put_autosuspend(&sw->dev); @@ -1019,7 +1028,6 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) * the new tunnel too early. */ pci_lock_rescan_remove(); - pm_runtime_get_sync(&sw->dev); switch (val) { /* Approve switch */ @@ -1040,8 +1048,6 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) break; } - pm_runtime_mark_last_busy(&sw->dev); - pm_runtime_put_autosuspend(&sw->dev); pci_unlock_rescan_remove(); if (!ret) { @@ -1069,7 +1075,10 @@ static ssize_t authorized_store(struct device *dev, if (val > 2) return -EINVAL; + pm_runtime_get_sync(&sw->dev); ret = tb_switch_set_authorized(sw, val); + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); return ret ? ret : count; } @@ -1195,8 +1204,12 @@ static ssize_t nvm_authenticate_store(struct device *dev, bool val; int ret; - if (!mutex_trylock(&sw->tb->lock)) - return restart_syscall(); + pm_runtime_get_sync(&sw->dev); + + if (!mutex_trylock(&sw->tb->lock)) { + ret = restart_syscall(); + goto exit_rpm; + } /* If NVMem devices are not yet added */ if (!sw->nvm) { @@ -1217,13 +1230,9 @@ static ssize_t nvm_authenticate_store(struct device *dev, goto exit_unlock; } - pm_runtime_get_sync(&sw->dev); ret = nvm_validate_and_write(sw); - if (ret) { - pm_runtime_mark_last_busy(&sw->dev); - pm_runtime_put_autosuspend(&sw->dev); + if (ret) goto exit_unlock; - } sw->nvm->authenticating = true; @@ -1239,12 +1248,13 @@ static ssize_t nvm_authenticate_store(struct device *dev, } else { ret = nvm_authenticate_device(sw); } - pm_runtime_mark_last_busy(&sw->dev); - pm_runtime_put_autosuspend(&sw->dev); } exit_unlock: mutex_unlock(&sw->tb->lock); +exit_rpm: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); if (ret) return ret; @@ -1380,11 +1390,22 @@ static void tb_switch_release(struct device *dev) */ static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) { + struct tb_switch *sw = tb_to_switch(dev); + const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; + + if (cm_ops->runtime_suspend_switch) + return cm_ops->runtime_suspend_switch(sw); + return 0; } static int __maybe_unused tb_switch_runtime_resume(struct device *dev) { + struct tb_switch *sw = tb_to_switch(dev); + const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; + + if (cm_ops->runtime_resume_switch) + return cm_ops->runtime_resume_switch(sw); return 0; } diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index b12c8f33d89c..6407d529871d 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -79,6 +79,8 @@ struct tb_switch_nvm { * @connection_key: Connection key used with ICM messaging * @link: Root switch link this switch is connected (ICM only) * @depth: Depth in the chain this switch is connected (ICM only) + * @rpm_complete: Completion used to wait for runtime resume to + * complete (ICM only) * * When the switch is being added or removed to the domain (other * switches) you need to have domain lock held. @@ -112,6 +114,7 @@ struct tb_switch { u8 connection_key; u8 link; u8 depth; + struct completion rpm_complete; }; /** @@ -250,6 +253,8 @@ struct tb_path { * @complete: Connection manager specific complete * @runtime_suspend: Connection manager specific runtime_suspend * @runtime_resume: Connection manager specific runtime_resume + * @runtime_suspend_switch: Runtime suspend a switch + * @runtime_resume_switch: Runtime resume a switch * @handle_event: Handle thunderbolt event * @get_boot_acl: Get boot ACL list * @set_boot_acl: Set boot ACL list @@ -270,6 +275,8 @@ struct tb_cm_ops { void (*complete)(struct tb *tb); int (*runtime_suspend)(struct tb *tb); int (*runtime_resume)(struct tb *tb); + int (*runtime_suspend_switch)(struct tb_switch *sw); + int (*runtime_resume_switch)(struct tb_switch *sw); void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type, const void *buf, size_t size); int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids); -- cgit v1.2.3 From 0d53827d7c172f1345140f7638fe658bda1bb25d Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Fri, 31 May 2019 13:31:54 +0300 Subject: thunderbolt: Implement CIO reset correctly for Titan Ridge When starting ICM firmware on Apple systems we need to perform CIO reset as part of the flow. However, it turns out that the reset register has changed to another location in Titan Ridge. Fix this by introducing ->cio_reset() callback with corresponding implementations for Alpine and Titan Ridge. Fixes: c4630d6ae6e3 ("thunderbolt: Start firmware on Titan Ridge Apple systems") Reported-by: Peter Bowen Signed-off-by: Mika Westerberg --- drivers/thunderbolt/icm.c | 134 +++++++++++++++++++++++++--------------------- 1 file changed, 74 insertions(+), 60 deletions(-) (limited to 'drivers') diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 40a8960b9a7e..fbdcef56a676 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -56,6 +56,7 @@ * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported) * @rpm: Does the controller support runtime PM (RTD3) * @is_supported: Checks if we can support ICM on this controller + * @cio_reset: Trigger CIO reset * @get_mode: Read and return the ICM firmware mode (optional) * @get_route: Find a route string for given switch * @save_devices: Ask ICM to save devices to ACL when suspending (optional) @@ -74,6 +75,7 @@ struct icm { bool safe_mode; bool rpm; bool (*is_supported)(struct tb *tb); + int (*cio_reset)(struct tb *tb); int (*get_mode)(struct tb *tb); int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route); void (*save_devices)(struct tb *tb); @@ -166,6 +168,65 @@ static inline u64 get_parent_route(u64 route) return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0; } +static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec) +{ + unsigned long end = jiffies + msecs_to_jiffies(timeout_msec); + u32 cmd; + + do { + pci_read_config_dword(icm->upstream_port, + icm->vnd_cap + PCIE2CIO_CMD, &cmd); + if (!(cmd & PCIE2CIO_CMD_START)) { + if (cmd & PCIE2CIO_CMD_TIMEOUT) + break; + return 0; + } + + msleep(50); + } while (time_before(jiffies, end)); + + return -ETIMEDOUT; +} + +static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs, + unsigned int port, unsigned int index, u32 *data) +{ + struct pci_dev *pdev = icm->upstream_port; + int ret, vnd_cap = icm->vnd_cap; + u32 cmd; + + cmd = index; + cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; + cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; + cmd |= PCIE2CIO_CMD_START; + pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); + + ret = pci2cio_wait_completion(icm, 5000); + if (ret) + return ret; + + pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data); + return 0; +} + +static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs, + unsigned int port, unsigned int index, u32 data) +{ + struct pci_dev *pdev = icm->upstream_port; + int vnd_cap = icm->vnd_cap; + u32 cmd; + + pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data); + + cmd = index; + cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; + cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; + cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START; + pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); + + return pci2cio_wait_completion(icm, 5000); +} + static bool icm_match(const struct tb_cfg_request *req, const struct ctl_pkg *pkg) { @@ -838,6 +899,11 @@ icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) } } +static int icm_tr_cio_reset(struct tb *tb) +{ + return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x777, BIT(1)); +} + static int icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, size_t *nboot_acl, bool *rpm) @@ -1244,6 +1310,11 @@ static bool icm_ar_is_supported(struct tb *tb) return false; } +static int icm_ar_cio_reset(struct tb *tb) +{ + return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x50, BIT(9)); +} + static int icm_ar_get_mode(struct tb *tb) { struct tb_nhi *nhi = tb->nhi; @@ -1481,65 +1552,6 @@ __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level, return -ETIMEDOUT; } -static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec) -{ - unsigned long end = jiffies + msecs_to_jiffies(timeout_msec); - u32 cmd; - - do { - pci_read_config_dword(icm->upstream_port, - icm->vnd_cap + PCIE2CIO_CMD, &cmd); - if (!(cmd & PCIE2CIO_CMD_START)) { - if (cmd & PCIE2CIO_CMD_TIMEOUT) - break; - return 0; - } - - msleep(50); - } while (time_before(jiffies, end)); - - return -ETIMEDOUT; -} - -static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs, - unsigned int port, unsigned int index, u32 *data) -{ - struct pci_dev *pdev = icm->upstream_port; - int ret, vnd_cap = icm->vnd_cap; - u32 cmd; - - cmd = index; - cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; - cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; - cmd |= PCIE2CIO_CMD_START; - pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); - - ret = pci2cio_wait_completion(icm, 5000); - if (ret) - return ret; - - pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data); - return 0; -} - -static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs, - unsigned int port, unsigned int index, u32 data) -{ - struct pci_dev *pdev = icm->upstream_port; - int vnd_cap = icm->vnd_cap; - u32 cmd; - - pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data); - - cmd = index; - cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; - cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; - cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START; - pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); - - return pci2cio_wait_completion(icm, 5000); -} - static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi) { struct icm *icm = tb_priv(tb); @@ -1560,7 +1572,7 @@ static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi) iowrite32(val, nhi->iobase + REG_FW_STS); /* Trigger CIO reset now */ - return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9)); + return icm->cio_reset(tb); } static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi) @@ -2027,6 +2039,7 @@ struct tb *icm_probe(struct tb_nhi *nhi) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI: icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; icm->is_supported = icm_ar_is_supported; + icm->cio_reset = icm_ar_cio_reset; icm->get_mode = icm_ar_get_mode; icm->get_route = icm_ar_get_route; icm->save_devices = icm_fr_save_devices; @@ -2042,6 +2055,7 @@ struct tb *icm_probe(struct tb_nhi *nhi) case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI: icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; icm->is_supported = icm_ar_is_supported; + icm->cio_reset = icm_tr_cio_reset; icm->get_mode = icm_ar_get_mode; icm->driver_ready = icm_tr_driver_ready; icm->device_connected = icm_tr_device_connected; -- cgit v1.2.3