summaryrefslogtreecommitdiff
path: root/virt/kvm
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2022-05-04 16:14:39 -0300
committerAlex Williamson <alex.williamson@redhat.com>2022-05-13 10:14:19 -0600
commit73b0565f19a8fbc18dcf4b9b5c26d1a47a69ab24 (patch)
treeae049dfda6e776293665c16d57e1ed05c8d19146 /virt/kvm
parentdc15f82f5329ab5daefa692bb80fb085a09ebd86 (diff)
kvm/vfio: Move KVM_DEV_VFIO_GROUP_* ioctls into functions
To make it easier to read and change in following patches. Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Cornelia Huck <cohuck@redhat.com> Reviewed-by: Yi Liu <yi.l.liu@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/1-v3-f7729924a7ea+25e33-vfio_kvm_no_group_jgg@nvidia.com Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/vfio.c225
1 files changed, 124 insertions, 101 deletions
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 8fcbc50221c2..512b3ca00f3f 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -181,149 +181,171 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
mutex_unlock(&kv->lock);
}
-static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
+static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
{
struct kvm_vfio *kv = dev->private;
struct vfio_group *vfio_group;
struct kvm_vfio_group *kvg;
- int32_t __user *argp = (int32_t __user *)(unsigned long)arg;
struct fd f;
- int32_t fd;
int ret;
- switch (attr) {
- case KVM_DEV_VFIO_GROUP_ADD:
- if (get_user(fd, argp))
- return -EFAULT;
-
- f = fdget(fd);
- if (!f.file)
- return -EBADF;
-
- vfio_group = kvm_vfio_group_get_external_user(f.file);
- fdput(f);
+ f = fdget(fd);
+ if (!f.file)
+ return -EBADF;
- if (IS_ERR(vfio_group))
- return PTR_ERR(vfio_group);
+ vfio_group = kvm_vfio_group_get_external_user(f.file);
+ fdput(f);
- mutex_lock(&kv->lock);
+ if (IS_ERR(vfio_group))
+ return PTR_ERR(vfio_group);
- list_for_each_entry(kvg, &kv->group_list, node) {
- if (kvg->vfio_group == vfio_group) {
- mutex_unlock(&kv->lock);
- kvm_vfio_group_put_external_user(vfio_group);
- return -EEXIST;
- }
- }
+ mutex_lock(&kv->lock);
- kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
- if (!kvg) {
- mutex_unlock(&kv->lock);
- kvm_vfio_group_put_external_user(vfio_group);
- return -ENOMEM;
+ list_for_each_entry(kvg, &kv->group_list, node) {
+ if (kvg->vfio_group == vfio_group) {
+ ret = -EEXIST;
+ goto err_unlock;
}
+ }
- list_add_tail(&kvg->node, &kv->group_list);
- kvg->vfio_group = vfio_group;
+ kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
+ if (!kvg) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
- kvm_arch_start_assignment(dev->kvm);
+ list_add_tail(&kvg->node, &kv->group_list);
+ kvg->vfio_group = vfio_group;
- mutex_unlock(&kv->lock);
+ kvm_arch_start_assignment(dev->kvm);
- kvm_vfio_group_set_kvm(vfio_group, dev->kvm);
+ mutex_unlock(&kv->lock);
- kvm_vfio_update_coherency(dev);
+ kvm_vfio_group_set_kvm(vfio_group, dev->kvm);
+ kvm_vfio_update_coherency(dev);
- return 0;
+ return 0;
+err_unlock:
+ mutex_unlock(&kv->lock);
+ kvm_vfio_group_put_external_user(vfio_group);
+ return ret;
+}
- case KVM_DEV_VFIO_GROUP_DEL:
- if (get_user(fd, argp))
- return -EFAULT;
+static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
+{
+ struct kvm_vfio *kv = dev->private;
+ struct kvm_vfio_group *kvg;
+ struct fd f;
+ int ret;
- f = fdget(fd);
- if (!f.file)
- return -EBADF;
+ f = fdget(fd);
+ if (!f.file)
+ return -EBADF;
- ret = -ENOENT;
+ ret = -ENOENT;
- mutex_lock(&kv->lock);
+ mutex_lock(&kv->lock);
- list_for_each_entry(kvg, &kv->group_list, node) {
- if (!kvm_vfio_external_group_match_file(kvg->vfio_group,
- f.file))
- continue;
+ list_for_each_entry(kvg, &kv->group_list, node) {
+ if (!kvm_vfio_external_group_match_file(kvg->vfio_group,
+ f.file))
+ continue;
- list_del(&kvg->node);
- kvm_arch_end_assignment(dev->kvm);
+ list_del(&kvg->node);
+ kvm_arch_end_assignment(dev->kvm);
#ifdef CONFIG_SPAPR_TCE_IOMMU
- kvm_spapr_tce_release_vfio_group(dev->kvm,
- kvg->vfio_group);
+ kvm_spapr_tce_release_vfio_group(dev->kvm, kvg->vfio_group);
#endif
- kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
- kvm_vfio_group_put_external_user(kvg->vfio_group);
- kfree(kvg);
- ret = 0;
- break;
- }
+ kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
+ kvm_vfio_group_put_external_user(kvg->vfio_group);
+ kfree(kvg);
+ ret = 0;
+ break;
+ }
- mutex_unlock(&kv->lock);
+ mutex_unlock(&kv->lock);
- fdput(f);
+ fdput(f);
- kvm_vfio_update_coherency(dev);
+ kvm_vfio_update_coherency(dev);
- return ret;
+ return ret;
+}
#ifdef CONFIG_SPAPR_TCE_IOMMU
- case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: {
- struct kvm_vfio_spapr_tce param;
- struct kvm_vfio *kv = dev->private;
- struct vfio_group *vfio_group;
- struct kvm_vfio_group *kvg;
- struct fd f;
- struct iommu_group *grp;
-
- if (copy_from_user(&param, (void __user *)arg,
- sizeof(struct kvm_vfio_spapr_tce)))
- return -EFAULT;
+static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
+ void __user *arg)
+{
+ struct kvm_vfio_spapr_tce param;
+ struct kvm_vfio *kv = dev->private;
+ struct vfio_group *vfio_group;
+ struct kvm_vfio_group *kvg;
+ struct fd f;
+ struct iommu_group *grp;
+ int ret;
- f = fdget(param.groupfd);
- if (!f.file)
- return -EBADF;
+ if (copy_from_user(&param, arg, sizeof(struct kvm_vfio_spapr_tce)))
+ return -EFAULT;
- vfio_group = kvm_vfio_group_get_external_user(f.file);
- fdput(f);
+ f = fdget(param.groupfd);
+ if (!f.file)
+ return -EBADF;
- if (IS_ERR(vfio_group))
- return PTR_ERR(vfio_group);
+ vfio_group = kvm_vfio_group_get_external_user(f.file);
+ fdput(f);
- grp = kvm_vfio_group_get_iommu_group(vfio_group);
- if (WARN_ON_ONCE(!grp)) {
- kvm_vfio_group_put_external_user(vfio_group);
- return -EIO;
- }
+ if (IS_ERR(vfio_group))
+ return PTR_ERR(vfio_group);
- ret = -ENOENT;
+ grp = kvm_vfio_group_get_iommu_group(vfio_group);
+ if (WARN_ON_ONCE(!grp)) {
+ ret = -EIO;
+ goto err_put_external;
+ }
- mutex_lock(&kv->lock);
+ ret = -ENOENT;
- list_for_each_entry(kvg, &kv->group_list, node) {
- if (kvg->vfio_group != vfio_group)
- continue;
+ mutex_lock(&kv->lock);
- ret = kvm_spapr_tce_attach_iommu_group(dev->kvm,
- param.tablefd, grp);
- break;
- }
+ list_for_each_entry(kvg, &kv->group_list, node) {
+ if (kvg->vfio_group != vfio_group)
+ continue;
- mutex_unlock(&kv->lock);
+ ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
+ grp);
+ break;
+ }
- iommu_group_put(grp);
- kvm_vfio_group_put_external_user(vfio_group);
+ mutex_unlock(&kv->lock);
- return ret;
- }
-#endif /* CONFIG_SPAPR_TCE_IOMMU */
+ iommu_group_put(grp);
+err_put_external:
+ kvm_vfio_group_put_external_user(vfio_group);
+ return ret;
+}
+#endif
+
+static int kvm_vfio_set_group(struct kvm_device *dev, long attr,
+ void __user *arg)
+{
+ int32_t __user *argp = arg;
+ int32_t fd;
+
+ switch (attr) {
+ case KVM_DEV_VFIO_GROUP_ADD:
+ if (get_user(fd, argp))
+ return -EFAULT;
+ return kvm_vfio_group_add(dev, fd);
+
+ case KVM_DEV_VFIO_GROUP_DEL:
+ if (get_user(fd, argp))
+ return -EFAULT;
+ return kvm_vfio_group_del(dev, fd);
+
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
+ return kvm_vfio_group_set_spapr_tce(dev, arg);
+#endif
}
return -ENXIO;
@@ -334,7 +356,8 @@ static int kvm_vfio_set_attr(struct kvm_device *dev,
{
switch (attr->group) {
case KVM_DEV_VFIO_GROUP:
- return kvm_vfio_set_group(dev, attr->attr, attr->addr);
+ return kvm_vfio_set_group(dev, attr->attr,
+ u64_to_user_ptr(attr->addr));
}
return -ENXIO;