summaryrefslogtreecommitdiff
path: root/drivers/iommu/iommufd/device.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/iommufd/device.c')
-rw-r--r--drivers/iommu/iommufd/device.c801
1 files changed, 640 insertions, 161 deletions
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index ed2937a4e196..ce78c3671539 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -4,6 +4,8 @@
#include <linux/iommufd.h>
#include <linux/slab.h>
#include <linux/iommu.h>
+#include <uapi/linux/iommufd.h>
+#include "../iommu-priv.h"
#include "io_pagetable.h"
#include "iommufd_private.h"
@@ -15,13 +17,127 @@ MODULE_PARM_DESC(
"Allow IOMMUFD to bind to devices even if the platform cannot isolate "
"the MSI interrupt window. Enabling this is a security weakness.");
+static void iommufd_group_release(struct kref *kref)
+{
+ struct iommufd_group *igroup =
+ container_of(kref, struct iommufd_group, ref);
+
+ WARN_ON(igroup->hwpt || !list_empty(&igroup->device_list));
+
+ xa_cmpxchg(&igroup->ictx->groups, iommu_group_id(igroup->group), igroup,
+ NULL, GFP_KERNEL);
+ iommu_group_put(igroup->group);
+ mutex_destroy(&igroup->lock);
+ kfree(igroup);
+}
+
+static void iommufd_put_group(struct iommufd_group *group)
+{
+ kref_put(&group->ref, iommufd_group_release);
+}
+
+static bool iommufd_group_try_get(struct iommufd_group *igroup,
+ struct iommu_group *group)
+{
+ if (!igroup)
+ return false;
+ /*
+ * group ID's cannot be re-used until the group is put back which does
+ * not happen if we could get an igroup pointer under the xa_lock.
+ */
+ if (WARN_ON(igroup->group != group))
+ return false;
+ return kref_get_unless_zero(&igroup->ref);
+}
+
+/*
+ * iommufd needs to store some more data for each iommu_group, we keep a
+ * parallel xarray indexed by iommu_group id to hold this instead of putting it
+ * in the core structure. To keep things simple the iommufd_group memory is
+ * unique within the iommufd_ctx. This makes it easy to check there are no
+ * memory leaks.
+ */
+static struct iommufd_group *iommufd_get_group(struct iommufd_ctx *ictx,
+ struct device *dev)
+{
+ struct iommufd_group *new_igroup;
+ struct iommufd_group *cur_igroup;
+ struct iommufd_group *igroup;
+ struct iommu_group *group;
+ unsigned int id;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return ERR_PTR(-ENODEV);
+
+ id = iommu_group_id(group);
+
+ xa_lock(&ictx->groups);
+ igroup = xa_load(&ictx->groups, id);
+ if (iommufd_group_try_get(igroup, group)) {
+ xa_unlock(&ictx->groups);
+ iommu_group_put(group);
+ return igroup;
+ }
+ xa_unlock(&ictx->groups);
+
+ new_igroup = kzalloc(sizeof(*new_igroup), GFP_KERNEL);
+ if (!new_igroup) {
+ iommu_group_put(group);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ kref_init(&new_igroup->ref);
+ mutex_init(&new_igroup->lock);
+ INIT_LIST_HEAD(&new_igroup->device_list);
+ new_igroup->sw_msi_start = PHYS_ADDR_MAX;
+ /* group reference moves into new_igroup */
+ new_igroup->group = group;
+
+ /*
+ * The ictx is not additionally refcounted here becase all objects using
+ * an igroup must put it before their destroy completes.
+ */
+ new_igroup->ictx = ictx;
+
+ /*
+ * We dropped the lock so igroup is invalid. NULL is a safe and likely
+ * value to assume for the xa_cmpxchg algorithm.
+ */
+ cur_igroup = NULL;
+ xa_lock(&ictx->groups);
+ while (true) {
+ igroup = __xa_cmpxchg(&ictx->groups, id, cur_igroup, new_igroup,
+ GFP_KERNEL);
+ if (xa_is_err(igroup)) {
+ xa_unlock(&ictx->groups);
+ iommufd_put_group(new_igroup);
+ return ERR_PTR(xa_err(igroup));
+ }
+
+ /* new_group was successfully installed */
+ if (cur_igroup == igroup) {
+ xa_unlock(&ictx->groups);
+ return new_igroup;
+ }
+
+ /* Check again if the current group is any good */
+ if (iommufd_group_try_get(igroup, group)) {
+ xa_unlock(&ictx->groups);
+ iommufd_put_group(new_igroup);
+ return igroup;
+ }
+ cur_igroup = igroup;
+ }
+}
+
void iommufd_device_destroy(struct iommufd_object *obj)
{
struct iommufd_device *idev =
container_of(obj, struct iommufd_device, obj);
iommu_device_release_dma_owner(idev->dev);
- iommu_group_put(idev->group);
+ iommufd_put_group(idev->igroup);
if (!iommufd_selftest_is_mock_dev(idev->dev))
iommufd_ctx_put(idev->ictx);
}
@@ -46,7 +162,7 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
struct device *dev, u32 *id)
{
struct iommufd_device *idev;
- struct iommu_group *group;
+ struct iommufd_group *igroup;
int rc;
/*
@@ -56,9 +172,29 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY))
return ERR_PTR(-EINVAL);
- group = iommu_group_get(dev);
- if (!group)
- return ERR_PTR(-ENODEV);
+ igroup = iommufd_get_group(ictx, dev);
+ if (IS_ERR(igroup))
+ return ERR_CAST(igroup);
+
+ /*
+ * For historical compat with VFIO the insecure interrupt path is
+ * allowed if the module parameter is set. Secure/Isolated means that a
+ * MemWr operation from the device (eg a simple DMA) cannot trigger an
+ * interrupt outside this iommufd context.
+ */
+ if (!iommufd_selftest_is_mock_dev(dev) &&
+ !iommu_group_has_isolated_msi(igroup->group)) {
+ if (!allow_unsafe_interrupts) {
+ rc = -EPERM;
+ goto out_group_put;
+ }
+
+ dev_warn(
+ dev,
+ "MSI interrupts are not secure, they cannot be isolated by the platform. "
+ "Check that platform features like interrupt remapping are enabled. "
+ "Use the \"allow_unsafe_interrupts\" module parameter to override\n");
+ }
rc = iommu_device_claim_dma_owner(dev, ictx);
if (rc)
@@ -77,8 +213,8 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
device_iommu_capable(dev, IOMMU_CAP_ENFORCE_CACHE_COHERENCY);
/* The calling driver is a user until iommufd_device_unbind() */
refcount_inc(&idev->obj.users);
- /* group refcount moves into iommufd_device */
- idev->group = group;
+ /* igroup refcount moves into iommufd_device */
+ idev->igroup = igroup;
/*
* If the caller fails after this success it must call
@@ -93,12 +229,43 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
out_release_owner:
iommu_device_release_dma_owner(dev);
out_group_put:
- iommu_group_put(group);
+ iommufd_put_group(igroup);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_bind, IOMMUFD);
/**
+ * iommufd_ctx_has_group - True if any device within the group is bound
+ * to the ictx
+ * @ictx: iommufd file descriptor
+ * @group: Pointer to a physical iommu_group struct
+ *
+ * True if any device within the group has been bound to this ictx, ex. via
+ * iommufd_device_bind(), therefore implying ictx ownership of the group.
+ */
+bool iommufd_ctx_has_group(struct iommufd_ctx *ictx, struct iommu_group *group)
+{
+ struct iommufd_object *obj;
+ unsigned long index;
+
+ if (!ictx || !group)
+ return false;
+
+ xa_lock(&ictx->objects);
+ xa_for_each(&ictx->objects, index, obj) {
+ if (obj->type == IOMMUFD_OBJ_DEVICE &&
+ container_of(obj, struct iommufd_device, obj)
+ ->igroup->group == group) {
+ xa_unlock(&ictx->objects);
+ return true;
+ }
+ }
+ xa_unlock(&ictx->objects);
+ return false;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_ctx_has_group, IOMMUFD);
+
+/**
* iommufd_device_unbind - Undo iommufd_device_bind()
* @idev: Device returned by iommufd_device_bind()
*
@@ -113,10 +280,22 @@ void iommufd_device_unbind(struct iommufd_device *idev)
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_unbind, IOMMUFD);
-static int iommufd_device_setup_msi(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt,
- phys_addr_t sw_msi_start)
+struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev)
+{
+ return idev->ictx;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_device_to_ictx, IOMMUFD);
+
+u32 iommufd_device_to_id(struct iommufd_device *idev)
{
+ return idev->obj.id;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_device_to_id, IOMMUFD);
+
+static int iommufd_group_setup_msi(struct iommufd_group *igroup,
+ struct iommufd_hw_pagetable *hwpt)
+{
+ phys_addr_t sw_msi_start = igroup->sw_msi_start;
int rc;
/*
@@ -143,128 +322,192 @@ static int iommufd_device_setup_msi(struct iommufd_device *idev,
*/
hwpt->msi_cookie = true;
}
-
- /*
- * For historical compat with VFIO the insecure interrupt path is
- * allowed if the module parameter is set. Insecure means that a MemWr
- * operation from the device (eg a simple DMA) cannot trigger an
- * interrupt outside this iommufd context.
- */
- if (!iommufd_selftest_is_mock_dev(idev->dev) &&
- !iommu_group_has_isolated_msi(idev->group)) {
- if (!allow_unsafe_interrupts)
- return -EPERM;
-
- dev_warn(
- idev->dev,
- "MSI interrupts are not secure, they cannot be isolated by the platform. "
- "Check that platform features like interrupt remapping are enabled. "
- "Use the \"allow_unsafe_interrupts\" module parameter to override\n");
- }
return 0;
}
-static bool iommufd_hw_pagetable_has_group(struct iommufd_hw_pagetable *hwpt,
- struct iommu_group *group)
-{
- struct iommufd_device *cur_dev;
-
- lockdep_assert_held(&hwpt->devices_lock);
-
- list_for_each_entry(cur_dev, &hwpt->devices, devices_item)
- if (cur_dev->group == group)
- return true;
- return false;
-}
-
int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev)
{
- phys_addr_t sw_msi_start = PHYS_ADDR_MAX;
int rc;
- lockdep_assert_held(&hwpt->devices_lock);
-
- if (WARN_ON(idev->hwpt))
- return -EINVAL;
+ mutex_lock(&idev->igroup->lock);
- /*
- * Try to upgrade the domain we have, it is an iommu driver bug to
- * report IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail
- * enforce_cache_coherency when there are no devices attached to the
- * domain.
- */
- if (idev->enforce_cache_coherency && !hwpt->enforce_cache_coherency) {
- if (hwpt->domain->ops->enforce_cache_coherency)
- hwpt->enforce_cache_coherency =
- hwpt->domain->ops->enforce_cache_coherency(
- hwpt->domain);
- if (!hwpt->enforce_cache_coherency) {
- WARN_ON(list_empty(&hwpt->devices));
- return -EINVAL;
- }
+ if (idev->igroup->hwpt != NULL && idev->igroup->hwpt != hwpt) {
+ rc = -EINVAL;
+ goto err_unlock;
}
- rc = iopt_table_enforce_group_resv_regions(&hwpt->ioas->iopt, idev->dev,
- idev->group, &sw_msi_start);
- if (rc)
- return rc;
+ /* Try to upgrade the domain we have */
+ if (idev->enforce_cache_coherency) {
+ rc = iommufd_hw_pagetable_enforce_cc(hwpt);
+ if (rc)
+ goto err_unlock;
+ }
- rc = iommufd_device_setup_msi(idev, hwpt, sw_msi_start);
+ rc = iopt_table_enforce_dev_resv_regions(&hwpt->ioas->iopt, idev->dev,
+ &idev->igroup->sw_msi_start);
if (rc)
- goto err_unresv;
+ goto err_unlock;
/*
- * FIXME: Hack around missing a device-centric iommu api, only attach to
- * the group once for the first device that is in the group.
+ * Only attach to the group once for the first device that is in the
+ * group. All the other devices will follow this attachment. The user
+ * should attach every device individually to the hwpt as the per-device
+ * reserved regions are only updated during individual device
+ * attachment.
*/
- if (!iommufd_hw_pagetable_has_group(hwpt, idev->group)) {
- rc = iommu_attach_group(hwpt->domain, idev->group);
+ if (list_empty(&idev->igroup->device_list)) {
+ rc = iommufd_group_setup_msi(idev->igroup, hwpt);
if (rc)
goto err_unresv;
+
+ rc = iommu_attach_group(hwpt->domain, idev->igroup->group);
+ if (rc)
+ goto err_unresv;
+ idev->igroup->hwpt = hwpt;
}
+ refcount_inc(&hwpt->obj.users);
+ list_add_tail(&idev->group_item, &idev->igroup->device_list);
+ mutex_unlock(&idev->igroup->lock);
return 0;
err_unresv:
iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev);
+err_unlock:
+ mutex_unlock(&idev->igroup->lock);
return rc;
}
-void iommufd_hw_pagetable_detach(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
+struct iommufd_hw_pagetable *
+iommufd_hw_pagetable_detach(struct iommufd_device *idev)
{
- if (!iommufd_hw_pagetable_has_group(hwpt, idev->group))
- iommu_detach_group(hwpt->domain, idev->group);
+ struct iommufd_hw_pagetable *hwpt = idev->igroup->hwpt;
+
+ mutex_lock(&idev->igroup->lock);
+ list_del(&idev->group_item);
+ if (list_empty(&idev->igroup->device_list)) {
+ iommu_detach_group(hwpt->domain, idev->igroup->group);
+ idev->igroup->hwpt = NULL;
+ }
iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev);
+ mutex_unlock(&idev->igroup->lock);
+
+ /* Caller must destroy hwpt */
+ return hwpt;
}
-static int iommufd_device_do_attach(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt)
+static struct iommufd_hw_pagetable *
+iommufd_device_do_attach(struct iommufd_device *idev,
+ struct iommufd_hw_pagetable *hwpt)
{
int rc;
- mutex_lock(&hwpt->devices_lock);
rc = iommufd_hw_pagetable_attach(hwpt, idev);
if (rc)
- goto out_unlock;
+ return ERR_PTR(rc);
+ return NULL;
+}
- idev->hwpt = hwpt;
- refcount_inc(&hwpt->obj.users);
- list_add(&idev->devices_item, &hwpt->devices);
-out_unlock:
- mutex_unlock(&hwpt->devices_lock);
- return rc;
+static struct iommufd_hw_pagetable *
+iommufd_device_do_replace(struct iommufd_device *idev,
+ struct iommufd_hw_pagetable *hwpt)
+{
+ struct iommufd_group *igroup = idev->igroup;
+ struct iommufd_hw_pagetable *old_hwpt;
+ unsigned int num_devices = 0;
+ struct iommufd_device *cur;
+ int rc;
+
+ mutex_lock(&idev->igroup->lock);
+
+ if (igroup->hwpt == NULL) {
+ rc = -EINVAL;
+ goto err_unlock;
+ }
+
+ if (hwpt == igroup->hwpt) {
+ mutex_unlock(&idev->igroup->lock);
+ return NULL;
+ }
+
+ /* Try to upgrade the domain we have */
+ list_for_each_entry(cur, &igroup->device_list, group_item) {
+ num_devices++;
+ if (cur->enforce_cache_coherency) {
+ rc = iommufd_hw_pagetable_enforce_cc(hwpt);
+ if (rc)
+ goto err_unlock;
+ }
+ }
+
+ old_hwpt = igroup->hwpt;
+ if (hwpt->ioas != old_hwpt->ioas) {
+ list_for_each_entry(cur, &igroup->device_list, group_item) {
+ rc = iopt_table_enforce_dev_resv_regions(
+ &hwpt->ioas->iopt, cur->dev, NULL);
+ if (rc)
+ goto err_unresv;
+ }
+ }
+
+ rc = iommufd_group_setup_msi(idev->igroup, hwpt);
+ if (rc)
+ goto err_unresv;
+
+ rc = iommu_group_replace_domain(igroup->group, hwpt->domain);
+ if (rc)
+ goto err_unresv;
+
+ if (hwpt->ioas != old_hwpt->ioas) {
+ list_for_each_entry(cur, &igroup->device_list, group_item)
+ iopt_remove_reserved_iova(&old_hwpt->ioas->iopt,
+ cur->dev);
+ }
+
+ igroup->hwpt = hwpt;
+
+ /*
+ * Move the refcounts held by the device_list to the new hwpt. Retain a
+ * refcount for this thread as the caller will free it.
+ */
+ refcount_add(num_devices, &hwpt->obj.users);
+ if (num_devices > 1)
+ WARN_ON(refcount_sub_and_test(num_devices - 1,
+ &old_hwpt->obj.users));
+ mutex_unlock(&idev->igroup->lock);
+
+ /* Caller must destroy old_hwpt */
+ return old_hwpt;
+err_unresv:
+ list_for_each_entry(cur, &igroup->device_list, group_item)
+ iopt_remove_reserved_iova(&hwpt->ioas->iopt, cur->dev);
+err_unlock:
+ mutex_unlock(&idev->igroup->lock);
+ return ERR_PTR(rc);
}
+typedef struct iommufd_hw_pagetable *(*attach_fn)(
+ struct iommufd_device *idev, struct iommufd_hw_pagetable *hwpt);
+
/*
* When automatically managing the domains we search for a compatible domain in
* the iopt and if one is found use it, otherwise create a new domain.
* Automatic domain selection will never pick a manually created domain.
*/
-static int iommufd_device_auto_get_domain(struct iommufd_device *idev,
- struct iommufd_ioas *ioas)
+static struct iommufd_hw_pagetable *
+iommufd_device_auto_get_domain(struct iommufd_device *idev,
+ struct iommufd_ioas *ioas, u32 *pt_id,
+ attach_fn do_attach)
{
+ /*
+ * iommufd_hw_pagetable_attach() is called by
+ * iommufd_hw_pagetable_alloc() in immediate attachment mode, same as
+ * iommufd_device_do_attach(). So if we are in this mode then we prefer
+ * to use the immediate_attach path as it supports drivers that can't
+ * directly allocate a domain.
+ */
+ bool immediate_attach = do_attach == iommufd_device_do_attach;
+ struct iommufd_hw_pagetable *destroy_hwpt;
struct iommufd_hw_pagetable *hwpt;
- int rc;
/*
* There is no differentiation when domains are allocated, so any domain
@@ -278,50 +521,58 @@ static int iommufd_device_auto_get_domain(struct iommufd_device *idev,
if (!iommufd_lock_obj(&hwpt->obj))
continue;
- rc = iommufd_device_do_attach(idev, hwpt);
+ destroy_hwpt = (*do_attach)(idev, hwpt);
+ if (IS_ERR(destroy_hwpt)) {
+ iommufd_put_object(&hwpt->obj);
+ /*
+ * -EINVAL means the domain is incompatible with the
+ * device. Other error codes should propagate to
+ * userspace as failure. Success means the domain is
+ * attached.
+ */
+ if (PTR_ERR(destroy_hwpt) == -EINVAL)
+ continue;
+ goto out_unlock;
+ }
+ *pt_id = hwpt->obj.id;
iommufd_put_object(&hwpt->obj);
-
- /*
- * -EINVAL means the domain is incompatible with the device.
- * Other error codes should propagate to userspace as failure.
- * Success means the domain is attached.
- */
- if (rc == -EINVAL)
- continue;
goto out_unlock;
}
- hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev, true);
+ hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev,
+ immediate_attach);
if (IS_ERR(hwpt)) {
- rc = PTR_ERR(hwpt);
+ destroy_hwpt = ERR_CAST(hwpt);
goto out_unlock;
}
+
+ if (!immediate_attach) {
+ destroy_hwpt = (*do_attach)(idev, hwpt);
+ if (IS_ERR(destroy_hwpt))
+ goto out_abort;
+ } else {
+ destroy_hwpt = NULL;
+ }
+
hwpt->auto_domain = true;
+ *pt_id = hwpt->obj.id;
- mutex_unlock(&ioas->mutex);
iommufd_object_finalize(idev->ictx, &hwpt->obj);
- return 0;
+ mutex_unlock(&ioas->mutex);
+ return destroy_hwpt;
+
+out_abort:
+ iommufd_object_abort_and_destroy(idev->ictx, &hwpt->obj);
out_unlock:
mutex_unlock(&ioas->mutex);
- return rc;
+ return destroy_hwpt;
}
-/**
- * iommufd_device_attach - Connect a device from an iommu_domain
- * @idev: device to attach
- * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HW_PAGETABLE
- * Output the IOMMUFD_OBJ_HW_PAGETABLE ID
- *
- * This connects the device to an iommu_domain, either automatically or manually
- * selected. Once this completes the device could do DMA.
- *
- * The caller should return the resulting pt_id back to userspace.
- * This function is undone by calling iommufd_device_detach().
- */
-int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
+static int iommufd_device_change_pt(struct iommufd_device *idev, u32 *pt_id,
+ attach_fn do_attach)
{
+ struct iommufd_hw_pagetable *destroy_hwpt;
struct iommufd_object *pt_obj;
- int rc;
pt_obj = iommufd_get_object(idev->ictx, *pt_id, IOMMUFD_OBJ_ANY);
if (IS_ERR(pt_obj))
@@ -332,8 +583,8 @@ int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
struct iommufd_hw_pagetable *hwpt =
container_of(pt_obj, struct iommufd_hw_pagetable, obj);
- rc = iommufd_device_do_attach(idev, hwpt);
- if (rc)
+ destroy_hwpt = (*do_attach)(idev, hwpt);
+ if (IS_ERR(destroy_hwpt))
goto out_put_pt_obj;
break;
}
@@ -341,27 +592,80 @@ int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
struct iommufd_ioas *ioas =
container_of(pt_obj, struct iommufd_ioas, obj);
- rc = iommufd_device_auto_get_domain(idev, ioas);
- if (rc)
+ destroy_hwpt = iommufd_device_auto_get_domain(idev, ioas, pt_id,
+ do_attach);
+ if (IS_ERR(destroy_hwpt))
goto out_put_pt_obj;
break;
}
default:
- rc = -EINVAL;
+ destroy_hwpt = ERR_PTR(-EINVAL);
goto out_put_pt_obj;
}
+ iommufd_put_object(pt_obj);
- refcount_inc(&idev->obj.users);
- *pt_id = idev->hwpt->obj.id;
- rc = 0;
+ /* This destruction has to be after we unlock everything */
+ if (destroy_hwpt)
+ iommufd_hw_pagetable_put(idev->ictx, destroy_hwpt);
+ return 0;
out_put_pt_obj:
iommufd_put_object(pt_obj);
- return rc;
+ return PTR_ERR(destroy_hwpt);
+}
+
+/**
+ * iommufd_device_attach - Connect a device to an iommu_domain
+ * @idev: device to attach
+ * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HW_PAGETABLE
+ * Output the IOMMUFD_OBJ_HW_PAGETABLE ID
+ *
+ * This connects the device to an iommu_domain, either automatically or manually
+ * selected. Once this completes the device could do DMA.
+ *
+ * The caller should return the resulting pt_id back to userspace.
+ * This function is undone by calling iommufd_device_detach().
+ */
+int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
+{
+ int rc;
+
+ rc = iommufd_device_change_pt(idev, pt_id, &iommufd_device_do_attach);
+ if (rc)
+ return rc;
+
+ /*
+ * Pairs with iommufd_device_detach() - catches caller bugs attempting
+ * to destroy a device with an attachment.
+ */
+ refcount_inc(&idev->obj.users);
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_attach, IOMMUFD);
/**
+ * iommufd_device_replace - Change the device's iommu_domain
+ * @idev: device to change
+ * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HW_PAGETABLE
+ * Output the IOMMUFD_OBJ_HW_PAGETABLE ID
+ *
+ * This is the same as::
+ *
+ * iommufd_device_detach();
+ * iommufd_device_attach();
+ *
+ * If it fails then no change is made to the attachment. The iommu driver may
+ * implement this so there is no disruption in translation. This can only be
+ * called if iommufd_device_attach() has already succeeded.
+ */
+int iommufd_device_replace(struct iommufd_device *idev, u32 *pt_id)
+{
+ return iommufd_device_change_pt(idev, pt_id,
+ &iommufd_device_do_replace);
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_device_replace, IOMMUFD);
+
+/**
* iommufd_device_detach - Disconnect a device to an iommu_domain
* @idev: device to detach
*
@@ -370,33 +674,87 @@ EXPORT_SYMBOL_NS_GPL(iommufd_device_attach, IOMMUFD);
*/
void iommufd_device_detach(struct iommufd_device *idev)
{
- struct iommufd_hw_pagetable *hwpt = idev->hwpt;
-
- mutex_lock(&hwpt->devices_lock);
- list_del(&idev->devices_item);
- idev->hwpt = NULL;
- iommufd_hw_pagetable_detach(hwpt, idev);
- mutex_unlock(&hwpt->devices_lock);
-
- if (hwpt->auto_domain)
- iommufd_object_deref_user(idev->ictx, &hwpt->obj);
- else
- refcount_dec(&hwpt->obj.users);
+ struct iommufd_hw_pagetable *hwpt;
+ hwpt = iommufd_hw_pagetable_detach(idev);
+ iommufd_hw_pagetable_put(idev->ictx, hwpt);
refcount_dec(&idev->obj.users);
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, IOMMUFD);
+/*
+ * On success, it will refcount_inc() at a valid new_ioas and refcount_dec() at
+ * a valid cur_ioas (access->ioas). A caller passing in a valid new_ioas should
+ * call iommufd_put_object() if it does an iommufd_get_object() for a new_ioas.
+ */
+static int iommufd_access_change_ioas(struct iommufd_access *access,
+ struct iommufd_ioas *new_ioas)
+{
+ u32 iopt_access_list_id = access->iopt_access_list_id;
+ struct iommufd_ioas *cur_ioas = access->ioas;
+ int rc;
+
+ lockdep_assert_held(&access->ioas_lock);
+
+ /* We are racing with a concurrent detach, bail */
+ if (cur_ioas != access->ioas_unpin)
+ return -EBUSY;
+
+ if (cur_ioas == new_ioas)
+ return 0;
+
+ /*
+ * Set ioas to NULL to block any further iommufd_access_pin_pages().
+ * iommufd_access_unpin_pages() can continue using access->ioas_unpin.
+ */
+ access->ioas = NULL;
+
+ if (new_ioas) {
+ rc = iopt_add_access(&new_ioas->iopt, access);
+ if (rc) {
+ access->ioas = cur_ioas;
+ return rc;
+ }
+ refcount_inc(&new_ioas->obj.users);
+ }
+
+ if (cur_ioas) {
+ if (access->ops->unmap) {
+ mutex_unlock(&access->ioas_lock);
+ access->ops->unmap(access->data, 0, ULONG_MAX);
+ mutex_lock(&access->ioas_lock);
+ }
+ iopt_remove_access(&cur_ioas->iopt, access, iopt_access_list_id);
+ refcount_dec(&cur_ioas->obj.users);
+ }
+
+ access->ioas = new_ioas;
+ access->ioas_unpin = new_ioas;
+
+ return 0;
+}
+
+static int iommufd_access_change_ioas_id(struct iommufd_access *access, u32 id)
+{
+ struct iommufd_ioas *ioas = iommufd_get_ioas(access->ictx, id);
+ int rc;
+
+ if (IS_ERR(ioas))
+ return PTR_ERR(ioas);
+ rc = iommufd_access_change_ioas(access, ioas);
+ iommufd_put_object(&ioas->obj);
+ return rc;
+}
+
void iommufd_access_destroy_object(struct iommufd_object *obj)
{
struct iommufd_access *access =
container_of(obj, struct iommufd_access, obj);
- if (access->ioas) {
- iopt_remove_access(&access->ioas->iopt, access);
- refcount_dec(&access->ioas->obj.users);
- access->ioas = NULL;
- }
+ mutex_lock(&access->ioas_lock);
+ if (access->ioas)
+ WARN_ON(iommufd_access_change_ioas(access, NULL));
+ mutex_unlock(&access->ioas_lock);
iommufd_ctx_put(access->ictx);
}
@@ -441,6 +799,7 @@ iommufd_access_create(struct iommufd_ctx *ictx,
iommufd_ctx_get(ictx);
iommufd_object_finalize(ictx, &access->obj);
*id = access->obj.id;
+ mutex_init(&access->ioas_lock);
return access;
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_create, IOMMUFD);
@@ -457,30 +816,49 @@ void iommufd_access_destroy(struct iommufd_access *access)
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_destroy, IOMMUFD);
+void iommufd_access_detach(struct iommufd_access *access)
+{
+ mutex_lock(&access->ioas_lock);
+ if (WARN_ON(!access->ioas)) {
+ mutex_unlock(&access->ioas_lock);
+ return;
+ }
+ WARN_ON(iommufd_access_change_ioas(access, NULL));
+ mutex_unlock(&access->ioas_lock);
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_access_detach, IOMMUFD);
+
int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id)
{
- struct iommufd_ioas *new_ioas;
- int rc = 0;
+ int rc;
- if (access->ioas)
+ mutex_lock(&access->ioas_lock);
+ if (WARN_ON(access->ioas)) {
+ mutex_unlock(&access->ioas_lock);
return -EINVAL;
-
- new_ioas = iommufd_get_ioas(access->ictx, ioas_id);
- if (IS_ERR(new_ioas))
- return PTR_ERR(new_ioas);
-
- rc = iopt_add_access(&new_ioas->iopt, access);
- if (rc) {
- iommufd_put_object(&new_ioas->obj);
- return rc;
}
- iommufd_ref_to_users(&new_ioas->obj);
- access->ioas = new_ioas;
- return 0;
+ rc = iommufd_access_change_ioas_id(access, ioas_id);
+ mutex_unlock(&access->ioas_lock);
+ return rc;
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_attach, IOMMUFD);
+int iommufd_access_replace(struct iommufd_access *access, u32 ioas_id)
+{
+ int rc;
+
+ mutex_lock(&access->ioas_lock);
+ if (!access->ioas) {
+ mutex_unlock(&access->ioas_lock);
+ return -ENOENT;
+ }
+ rc = iommufd_access_change_ioas_id(access, ioas_id);
+ mutex_unlock(&access->ioas_lock);
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_access_replace, IOMMUFD);
+
/**
* iommufd_access_notify_unmap - Notify users of an iopt to stop using it
* @iopt: iopt to work on
@@ -531,8 +909,8 @@ void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
void iommufd_access_unpin_pages(struct iommufd_access *access,
unsigned long iova, unsigned long length)
{
- struct io_pagetable *iopt = &access->ioas->iopt;
struct iopt_area_contig_iter iter;
+ struct io_pagetable *iopt;
unsigned long last_iova;
struct iopt_area *area;
@@ -540,6 +918,17 @@ void iommufd_access_unpin_pages(struct iommufd_access *access,
WARN_ON(check_add_overflow(iova, length - 1, &last_iova)))
return;
+ mutex_lock(&access->ioas_lock);
+ /*
+ * The driver must be doing something wrong if it calls this before an
+ * iommufd_access_attach() or after an iommufd_access_detach().
+ */
+ if (WARN_ON(!access->ioas_unpin)) {
+ mutex_unlock(&access->ioas_lock);
+ return;
+ }
+ iopt = &access->ioas_unpin->iopt;
+
down_read(&iopt->iova_rwsem);
iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova)
iopt_area_remove_access(
@@ -549,6 +938,7 @@ void iommufd_access_unpin_pages(struct iommufd_access *access,
min(last_iova, iopt_area_last_iova(area))));
WARN_ON(!iopt_area_contig_done(&iter));
up_read(&iopt->iova_rwsem);
+ mutex_unlock(&access->ioas_lock);
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_unpin_pages, IOMMUFD);
@@ -594,8 +984,8 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
unsigned long length, struct page **out_pages,
unsigned int flags)
{
- struct io_pagetable *iopt = &access->ioas->iopt;
struct iopt_area_contig_iter iter;
+ struct io_pagetable *iopt;
unsigned long last_iova;
struct iopt_area *area;
int rc;
@@ -610,6 +1000,13 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
if (check_add_overflow(iova, length - 1, &last_iova))
return -EOVERFLOW;
+ mutex_lock(&access->ioas_lock);
+ if (!access->ioas) {
+ mutex_unlock(&access->ioas_lock);
+ return -ENOENT;
+ }
+ iopt = &access->ioas->iopt;
+
down_read(&iopt->iova_rwsem);
iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
unsigned long last = min(last_iova, iopt_area_last_iova(area));
@@ -640,6 +1037,7 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
}
up_read(&iopt->iova_rwsem);
+ mutex_unlock(&access->ioas_lock);
return 0;
err_remove:
@@ -654,6 +1052,7 @@ err_remove:
iopt_area_last_iova(area))));
}
up_read(&iopt->iova_rwsem);
+ mutex_unlock(&access->ioas_lock);
return rc;
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_pin_pages, IOMMUFD);
@@ -673,8 +1072,8 @@ EXPORT_SYMBOL_NS_GPL(iommufd_access_pin_pages, IOMMUFD);
int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
void *data, size_t length, unsigned int flags)
{
- struct io_pagetable *iopt = &access->ioas->iopt;
struct iopt_area_contig_iter iter;
+ struct io_pagetable *iopt;
struct iopt_area *area;
unsigned long last_iova;
int rc;
@@ -684,6 +1083,13 @@ int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
if (check_add_overflow(iova, length - 1, &last_iova))
return -EOVERFLOW;
+ mutex_lock(&access->ioas_lock);
+ if (!access->ioas) {
+ mutex_unlock(&access->ioas_lock);
+ return -ENOENT;
+ }
+ iopt = &access->ioas->iopt;
+
down_read(&iopt->iova_rwsem);
iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
unsigned long last = min(last_iova, iopt_area_last_iova(area));
@@ -710,6 +1116,79 @@ int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
rc = -ENOENT;
err_out:
up_read(&iopt->iova_rwsem);
+ mutex_unlock(&access->ioas_lock);
return rc;
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_rw, IOMMUFD);
+
+int iommufd_get_hw_info(struct iommufd_ucmd *ucmd)
+{
+ struct iommu_hw_info *cmd = ucmd->cmd;
+ void __user *user_ptr = u64_to_user_ptr(cmd->data_uptr);
+ const struct iommu_ops *ops;
+ struct iommufd_device *idev;
+ unsigned int data_len;
+ unsigned int copy_len;
+ void *data;
+ int rc;
+
+ if (cmd->flags || cmd->__reserved)
+ return -EOPNOTSUPP;
+
+ idev = iommufd_get_device(ucmd, cmd->dev_id);
+ if (IS_ERR(idev))
+ return PTR_ERR(idev);
+
+ ops = dev_iommu_ops(idev->dev);
+ if (ops->hw_info) {
+ data = ops->hw_info(idev->dev, &data_len, &cmd->out_data_type);
+ if (IS_ERR(data)) {
+ rc = PTR_ERR(data);
+ goto out_put;
+ }
+
+ /*
+ * drivers that have hw_info callback should have a unique
+ * iommu_hw_info_type.
+ */
+ if (WARN_ON_ONCE(cmd->out_data_type ==
+ IOMMU_HW_INFO_TYPE_NONE)) {
+ rc = -ENODEV;
+ goto out_free;
+ }
+ } else {
+ cmd->out_data_type = IOMMU_HW_INFO_TYPE_NONE;
+ data_len = 0;
+ data = NULL;
+ }
+
+ copy_len = min(cmd->data_len, data_len);
+ if (copy_to_user(user_ptr, data, copy_len)) {
+ rc = -EFAULT;
+ goto out_free;
+ }
+
+ /*
+ * Zero the trailing bytes if the user buffer is bigger than the
+ * data size kernel actually has.
+ */
+ if (copy_len < cmd->data_len) {
+ if (clear_user(user_ptr + copy_len, cmd->data_len - copy_len)) {
+ rc = -EFAULT;
+ goto out_free;
+ }
+ }
+
+ /*
+ * We return the length the kernel supports so userspace may know what
+ * the kernel capability is. It could be larger than the input buffer.
+ */
+ cmd->data_len = data_len;
+
+ rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+out_free:
+ kfree(data);
+out_put:
+ iommufd_put_object(&idev->obj);
+ return rc;
+}