diff options
Diffstat (limited to 'drivers/media/v4l2-core')
-rw-r--r-- | drivers/media/v4l2-core/Makefile | 1 | ||||
-rw-r--r-- | drivers/media/v4l2-core/tuner-core.c | 2 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-async.c | 523 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-clk.c | 3 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 13 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-ctrls.c | 22 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-flash-led-class.c | 139 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-fwnode.c | 843 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-ioctl.c | 21 | ||||
-rw-r--r-- | drivers/media/v4l2-core/v4l2-trace.c | 1 | ||||
-rw-r--r-- | drivers/media/v4l2-core/vb2-trace.c | 1 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-core.c | 27 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-contig.c | 9 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-dma-sg.c | 8 | ||||
-rw-r--r-- | drivers/media/v4l2-core/videobuf2-vmalloc.c | 8 |
15 files changed, 1271 insertions, 350 deletions
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile index 098ad5fd5231..77303286aef7 100644 --- a/drivers/media/v4l2-core/Makefile +++ b/drivers/media/v4l2-core/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 # # Makefile for the V4L2 core # diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c index e48b7c032c95..8db45dfc271b 100644 --- a/drivers/media/v4l2-core/tuner-core.c +++ b/drivers/media/v4l2-core/tuner-core.c @@ -43,8 +43,6 @@ #define UNSET (-1U) -#define PREFIX (t->i2c->dev.driver->name) - /* * Driver modprobe parameters */ diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c index 851f128eba22..a7c3464976f2 100644 --- a/drivers/media/v4l2-core/v4l2-async.c +++ b/drivers/media/v4l2-core/v4l2-async.c @@ -22,8 +22,37 @@ #include <media/v4l2-async.h> #include <media/v4l2-device.h> +#include <media/v4l2-fwnode.h> #include <media/v4l2-subdev.h> +static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n, + struct v4l2_subdev *subdev, + struct v4l2_async_subdev *asd) +{ + if (!n->ops || !n->ops->bound) + return 0; + + return n->ops->bound(n, subdev, asd); +} + +static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n, + struct v4l2_subdev *subdev, + struct v4l2_async_subdev *asd) +{ + if (!n->ops || !n->ops->unbind) + return; + + n->ops->unbind(n, subdev, asd); +} + +static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n) +{ + if (!n->ops || !n->ops->complete) + return 0; + + return n->ops->complete(n); +} + static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) { #if IS_ENABLED(CONFIG_I2C) @@ -44,12 +73,7 @@ static bool match_devname(struct v4l2_subdev *sd, static bool match_fwnode(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) { - if (!is_of_node(sd->fwnode) || !is_of_node(asd->match.fwnode.fwnode)) - return sd->fwnode == asd->match.fwnode.fwnode; - - return !of_node_cmp(of_node_full_name(to_of_node(sd->fwnode)), - of_node_full_name( - to_of_node(asd->match.fwnode.fwnode))); + return sd->fwnode == asd->match.fwnode.fwnode; } static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) @@ -65,8 +89,8 @@ static LIST_HEAD(subdev_list); static LIST_HEAD(notifier_list); static DEFINE_MUTEX(list_lock); -static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier, - struct v4l2_subdev *sd) +static struct v4l2_async_subdev *v4l2_async_find_match( + struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd) { bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *); struct v4l2_async_subdev *asd; @@ -100,22 +124,96 @@ static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier * return NULL; } -static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier, - struct v4l2_subdev *sd, - struct v4l2_async_subdev *asd) +/* Find the sub-device notifier registered by a sub-device driver. */ +static struct v4l2_async_notifier *v4l2_async_find_subdev_notifier( + struct v4l2_subdev *sd) { - int ret; + struct v4l2_async_notifier *n; - if (notifier->bound) { - ret = notifier->bound(notifier, sd, asd); - if (ret < 0) - return ret; + list_for_each_entry(n, ¬ifier_list, list) + if (n->sd == sd) + return n; + + return NULL; +} + +/* Get v4l2_device related to the notifier if one can be found. */ +static struct v4l2_device *v4l2_async_notifier_find_v4l2_dev( + struct v4l2_async_notifier *notifier) +{ + while (notifier->parent) + notifier = notifier->parent; + + return notifier->v4l2_dev; +} + +/* + * Return true if all child sub-device notifiers are complete, false otherwise. + */ +static bool v4l2_async_notifier_can_complete( + struct v4l2_async_notifier *notifier) +{ + struct v4l2_subdev *sd; + + if (!list_empty(¬ifier->waiting)) + return false; + + list_for_each_entry(sd, ¬ifier->done, async_list) { + struct v4l2_async_notifier *subdev_notifier = + v4l2_async_find_subdev_notifier(sd); + + if (subdev_notifier && + !v4l2_async_notifier_can_complete(subdev_notifier)) + return false; } - ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd); + return true; +} + +/* + * Complete the master notifier if possible. This is done when all async + * sub-devices have been bound; v4l2_device is also available then. + */ +static int v4l2_async_notifier_try_complete( + struct v4l2_async_notifier *notifier) +{ + /* Quick check whether there are still more sub-devices here. */ + if (!list_empty(¬ifier->waiting)) + return 0; + + /* Check the entire notifier tree; find the root notifier first. */ + while (notifier->parent) + notifier = notifier->parent; + + /* This is root if it has v4l2_dev. */ + if (!notifier->v4l2_dev) + return 0; + + /* Is everything ready? */ + if (!v4l2_async_notifier_can_complete(notifier)) + return 0; + + return v4l2_async_notifier_call_complete(notifier); +} + +static int v4l2_async_notifier_try_all_subdevs( + struct v4l2_async_notifier *notifier); + +static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier, + struct v4l2_device *v4l2_dev, + struct v4l2_subdev *sd, + struct v4l2_async_subdev *asd) +{ + struct v4l2_async_notifier *subdev_notifier; + int ret; + + ret = v4l2_device_register_subdev(v4l2_dev, sd); + if (ret < 0) + return ret; + + ret = v4l2_async_notifier_call_bound(notifier, sd, asd); if (ret < 0) { - if (notifier->unbind) - notifier->unbind(notifier, sd, asd); + v4l2_device_unregister_subdev(sd); return ret; } @@ -127,8 +225,55 @@ static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier, /* Move from the global subdevice list to notifier's done */ list_move(&sd->async_list, ¬ifier->done); - if (list_empty(¬ifier->waiting) && notifier->complete) - return notifier->complete(notifier); + /* + * See if the sub-device has a notifier. If not, return here. + */ + subdev_notifier = v4l2_async_find_subdev_notifier(sd); + if (!subdev_notifier || subdev_notifier->parent) + return 0; + + /* + * Proceed with checking for the sub-device notifier's async + * sub-devices, and return the result. The error will be handled by the + * caller. + */ + subdev_notifier->parent = notifier; + + return v4l2_async_notifier_try_all_subdevs(subdev_notifier); +} + +/* Test all async sub-devices in a notifier for a match. */ +static int v4l2_async_notifier_try_all_subdevs( + struct v4l2_async_notifier *notifier) +{ + struct v4l2_device *v4l2_dev = + v4l2_async_notifier_find_v4l2_dev(notifier); + struct v4l2_subdev *sd; + + if (!v4l2_dev) + return 0; + +again: + list_for_each_entry(sd, &subdev_list, async_list) { + struct v4l2_async_subdev *asd; + int ret; + + asd = v4l2_async_find_match(notifier, sd); + if (!asd) + continue; + + ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd); + if (ret < 0) + return ret; + + /* + * v4l2_async_match_notify() may lead to registering a + * new notifier and thus changing the async subdevs + * list. In order to proceed safely from here, restart + * parsing the list from the beginning. + */ + goto again; + } return 0; } @@ -139,24 +284,107 @@ static void v4l2_async_cleanup(struct v4l2_subdev *sd) /* Subdevice driver will reprobe and put the subdev back onto the list */ list_del_init(&sd->async_list); sd->asd = NULL; - sd->dev = NULL; } -int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev, - struct v4l2_async_notifier *notifier) +/* Unbind all sub-devices in the notifier tree. */ +static void v4l2_async_notifier_unbind_all_subdevs( + struct v4l2_async_notifier *notifier) { struct v4l2_subdev *sd, *tmp; + + list_for_each_entry_safe(sd, tmp, ¬ifier->done, async_list) { + struct v4l2_async_notifier *subdev_notifier = + v4l2_async_find_subdev_notifier(sd); + + if (subdev_notifier) + v4l2_async_notifier_unbind_all_subdevs(subdev_notifier); + + v4l2_async_notifier_call_unbind(notifier, sd, sd->asd); + v4l2_async_cleanup(sd); + + list_move(&sd->async_list, &subdev_list); + } + + notifier->parent = NULL; +} + +/* See if an fwnode can be found in a notifier's lists. */ +static bool __v4l2_async_notifier_fwnode_has_async_subdev( + struct v4l2_async_notifier *notifier, struct fwnode_handle *fwnode) +{ + struct v4l2_async_subdev *asd; + struct v4l2_subdev *sd; + + list_for_each_entry(asd, ¬ifier->waiting, list) { + if (asd->match_type != V4L2_ASYNC_MATCH_FWNODE) + continue; + + if (asd->match.fwnode.fwnode == fwnode) + return true; + } + + list_for_each_entry(sd, ¬ifier->done, async_list) { + if (WARN_ON(!sd->asd)) + continue; + + if (sd->asd->match_type != V4L2_ASYNC_MATCH_FWNODE) + continue; + + if (sd->asd->match.fwnode.fwnode == fwnode) + return true; + } + + return false; +} + +/* + * Find out whether an async sub-device was set up for an fwnode already or + * whether it exists in a given notifier before @this_index. + */ +static bool v4l2_async_notifier_fwnode_has_async_subdev( + struct v4l2_async_notifier *notifier, struct fwnode_handle *fwnode, + unsigned int this_index) +{ + unsigned int j; + + lockdep_assert_held(&list_lock); + + /* Check that an fwnode is not being added more than once. */ + for (j = 0; j < this_index; j++) { + struct v4l2_async_subdev *asd = notifier->subdevs[this_index]; + struct v4l2_async_subdev *other_asd = notifier->subdevs[j]; + + if (other_asd->match_type == V4L2_ASYNC_MATCH_FWNODE && + asd->match.fwnode.fwnode == + other_asd->match.fwnode.fwnode) + return true; + } + + /* Check than an fwnode did not exist in other notifiers. */ + list_for_each_entry(notifier, ¬ifier_list, list) + if (__v4l2_async_notifier_fwnode_has_async_subdev( + notifier, fwnode)) + return true; + + return false; +} + +static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier) +{ + struct device *dev = + notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL; struct v4l2_async_subdev *asd; + int ret; int i; - if (!v4l2_dev || !notifier->num_subdevs || - notifier->num_subdevs > V4L2_MAX_SUBDEVS) + if (notifier->num_subdevs > V4L2_MAX_SUBDEVS) return -EINVAL; - notifier->v4l2_dev = v4l2_dev; INIT_LIST_HEAD(¬ifier->waiting); INIT_LIST_HEAD(¬ifier->done); + mutex_lock(&list_lock); + for (i = 0; i < notifier->num_subdevs; i++) { asd = notifier->subdevs[i]; @@ -164,32 +392,32 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev, case V4L2_ASYNC_MATCH_CUSTOM: case V4L2_ASYNC_MATCH_DEVNAME: case V4L2_ASYNC_MATCH_I2C: + break; case V4L2_ASYNC_MATCH_FWNODE: + if (v4l2_async_notifier_fwnode_has_async_subdev( + notifier, asd->match.fwnode.fwnode, i)) { + dev_err(dev, + "fwnode has already been registered or in notifier's subdev list\n"); + ret = -EEXIST; + goto err_unlock; + } break; default: - dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL, - "Invalid match type %u on %p\n", + dev_err(dev, "Invalid match type %u on %p\n", asd->match_type, asd); - return -EINVAL; + ret = -EINVAL; + goto err_unlock; } list_add_tail(&asd->list, ¬ifier->waiting); } - mutex_lock(&list_lock); + ret = v4l2_async_notifier_try_all_subdevs(notifier); + if (ret < 0) + goto err_unbind; - list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) { - int ret; - - asd = v4l2_async_belongs(notifier, sd); - if (!asd) - continue; - - ret = v4l2_async_test_notify(notifier, sd, asd); - if (ret < 0) { - mutex_unlock(&list_lock); - return ret; - } - } + ret = v4l2_async_notifier_try_complete(notifier); + if (ret < 0) + goto err_unbind; /* Keep also completed notifiers on the list */ list_add(¬ifier->list, ¬ifier_list); @@ -197,90 +425,114 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev, mutex_unlock(&list_lock); return 0; + +err_unbind: + /* + * On failure, unbind all sub-devices registered through this notifier. + */ + v4l2_async_notifier_unbind_all_subdevs(notifier); + +err_unlock: + mutex_unlock(&list_lock); + + return ret; +} + +int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev, + struct v4l2_async_notifier *notifier) +{ + int ret; + + if (WARN_ON(!v4l2_dev || notifier->sd)) + return -EINVAL; + + notifier->v4l2_dev = v4l2_dev; + + ret = __v4l2_async_notifier_register(notifier); + if (ret) + notifier->v4l2_dev = NULL; + + return ret; } EXPORT_SYMBOL(v4l2_async_notifier_register); -void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier) +int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd, + struct v4l2_async_notifier *notifier) { - struct v4l2_subdev *sd, *tmp; - unsigned int notif_n_subdev = notifier->num_subdevs; - unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS); - struct device **dev; - int i = 0; + int ret; - if (!notifier->v4l2_dev) - return; + if (WARN_ON(!sd || notifier->v4l2_dev)) + return -EINVAL; - dev = kvmalloc_array(n_subdev, sizeof(*dev), GFP_KERNEL); - if (!dev) { - dev_err(notifier->v4l2_dev->dev, - "Failed to allocate device cache!\n"); - } + notifier->sd = sd; - mutex_lock(&list_lock); + ret = __v4l2_async_notifier_register(notifier); + if (ret) + notifier->sd = NULL; - list_del(¬ifier->list); + return ret; +} +EXPORT_SYMBOL(v4l2_async_subdev_notifier_register); - list_for_each_entry_safe(sd, tmp, ¬ifier->done, async_list) { - struct device *d; +static void __v4l2_async_notifier_unregister( + struct v4l2_async_notifier *notifier) +{ + if (!notifier || (!notifier->v4l2_dev && !notifier->sd)) + return; - d = get_device(sd->dev); + v4l2_async_notifier_unbind_all_subdevs(notifier); - v4l2_async_cleanup(sd); + notifier->sd = NULL; + notifier->v4l2_dev = NULL; - /* If we handled USB devices, we'd have to lock the parent too */ - device_release_driver(d); + list_del(¬ifier->list); +} - if (notifier->unbind) - notifier->unbind(notifier, sd, sd->asd); +void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier) +{ + mutex_lock(&list_lock); - /* - * Store device at the device cache, in order to call - * put_device() on the final step - */ - if (dev) - dev[i++] = d; - else - put_device(d); - } + __v4l2_async_notifier_unregister(notifier); mutex_unlock(&list_lock); +} +EXPORT_SYMBOL(v4l2_async_notifier_unregister); - /* - * Call device_attach() to reprobe devices - * - * NOTE: If dev allocation fails, i is 0, and the whole loop won't be - * executed. - */ - while (i--) { - struct device *d = dev[i]; - - if (d && device_attach(d) < 0) { - const char *name = "(none)"; - int lock = device_trylock(d); - - if (lock && d->driver) - name = d->driver->name; - dev_err(d, "Failed to re-probe to %s\n", name); - if (lock) - device_unlock(d); +void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier) +{ + unsigned int i; + + if (!notifier || !notifier->max_subdevs) + return; + + for (i = 0; i < notifier->num_subdevs; i++) { + struct v4l2_async_subdev *asd = notifier->subdevs[i]; + + switch (asd->match_type) { + case V4L2_ASYNC_MATCH_FWNODE: + fwnode_handle_put(asd->match.fwnode.fwnode); + break; + default: + WARN_ON_ONCE(true); + break; } - put_device(d); + + kfree(asd); } - kvfree(dev); - notifier->v4l2_dev = NULL; + notifier->max_subdevs = 0; + notifier->num_subdevs = 0; - /* - * Don't care about the waiting list, it is initialised and populated - * upon notifier registration. - */ + kvfree(notifier->subdevs); + notifier->subdevs = NULL; } -EXPORT_SYMBOL(v4l2_async_notifier_unregister); +EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup); int v4l2_async_register_subdev(struct v4l2_subdev *sd) { + struct v4l2_async_notifier *subdev_notifier; struct v4l2_async_notifier *notifier; + int ret; /* * No reference taken. The reference is held by the device @@ -295,41 +547,74 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd) INIT_LIST_HEAD(&sd->async_list); list_for_each_entry(notifier, ¬ifier_list, list) { - struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd); - if (asd) { - int ret = v4l2_async_test_notify(notifier, sd, asd); - mutex_unlock(&list_lock); - return ret; - } + struct v4l2_device *v4l2_dev = + v4l2_async_notifier_find_v4l2_dev(notifier); + struct v4l2_async_subdev *asd; + + if (!v4l2_dev) + continue; + + asd = v4l2_async_find_match(notifier, sd); + if (!asd) + continue; + + ret = v4l2_async_match_notify(notifier, notifier->v4l2_dev, sd, + asd); + if (ret) + goto err_unbind; + + ret = v4l2_async_notifier_try_complete(notifier); + if (ret) + goto err_unbind; + + goto out_unlock; } /* None matched, wait for hot-plugging */ list_add(&sd->async_list, &subdev_list); +out_unlock: mutex_unlock(&list_lock); return 0; + +err_unbind: + /* + * Complete failed. Unbind the sub-devices bound through registering + * this async sub-device. + */ + subdev_notifier = v4l2_async_find_subdev_notifier(sd); + if (subdev_notifier) + v4l2_async_notifier_unbind_all_subdevs(subdev_notifier); + + if (sd->asd) + v4l2_async_notifier_call_unbind(notifier, sd, sd->asd); + v4l2_async_cleanup(sd); + + mutex_unlock(&list_lock); + + return ret; } EXPORT_SYMBOL(v4l2_async_register_subdev); void v4l2_async_unregister_subdev(struct v4l2_subdev *sd) { - struct v4l2_async_notifier *notifier = sd->notifier; + mutex_lock(&list_lock); - if (!sd->asd) { - if (!list_empty(&sd->async_list)) - v4l2_async_cleanup(sd); - return; - } + __v4l2_async_notifier_unregister(sd->subdev_notifier); + v4l2_async_notifier_cleanup(sd->subdev_notifier); + kfree(sd->subdev_notifier); + sd->subdev_notifier = NULL; - mutex_lock(&list_lock); + if (sd->asd) { + struct v4l2_async_notifier *notifier = sd->notifier; - list_add(&sd->asd->list, ¬ifier->waiting); + list_add(&sd->asd->list, ¬ifier->waiting); - v4l2_async_cleanup(sd); + v4l2_async_notifier_call_unbind(notifier, sd, sd->asd); + } - if (notifier->unbind) - notifier->unbind(notifier, sd, sd->asd); + v4l2_async_cleanup(sd); mutex_unlock(&list_lock); } diff --git a/drivers/media/v4l2-core/v4l2-clk.c b/drivers/media/v4l2-core/v4l2-clk.c index 297e10e69898..90628d7a04de 100644 --- a/drivers/media/v4l2-core/v4l2-clk.c +++ b/drivers/media/v4l2-core/v4l2-clk.c @@ -61,8 +61,7 @@ struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id) /* if dev_name is not found, try use the OF name to find again */ if (PTR_ERR(clk) == -ENODEV && dev->of_node) { - v4l2_clk_name_of(clk_name, sizeof(clk_name), - of_node_full_name(dev->of_node)); + v4l2_clk_name_of(clk_name, sizeof(clk_name), dev->of_node); clk = v4l2_clk_find(clk_name); } diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 6f52970f8b54..821f2aa299ae 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -43,6 +43,7 @@ struct v4l2_window32 { compat_caddr_t clips; /* actually struct v4l2_clip32 * */ __u32 clipcount; compat_caddr_t bitmap; + __u8 global_alpha; }; static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) @@ -51,7 +52,8 @@ static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user copy_from_user(&kp->w, &up->w, sizeof(up->w)) || get_user(kp->field, &up->field) || get_user(kp->chromakey, &up->chromakey) || - get_user(kp->clipcount, &up->clipcount)) + get_user(kp->clipcount, &up->clipcount) || + get_user(kp->global_alpha, &up->global_alpha)) return -EFAULT; if (kp->clipcount > 2048) return -EINVAL; @@ -84,7 +86,8 @@ static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) || put_user(kp->field, &up->field) || put_user(kp->chromakey, &up->chromakey) || - put_user(kp->clipcount, &up->clipcount)) + put_user(kp->clipcount, &up->clipcount) || + put_user(kp->global_alpha, &up->global_alpha)) return -EFAULT; return 0; } @@ -627,7 +630,8 @@ struct v4l2_input32 { __u32 tuner; /* Associated tuner */ compat_u64 std; __u32 status; - __u32 reserved[4]; + __u32 capabilities; + __u32 reserved[3]; }; /* The 64-bit v4l2_input struct has extra padding at the end of the struct. @@ -796,7 +800,8 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u copy_to_user(&up->u, &kp->u, sizeof(kp->u)) || put_user(kp->pending, &up->pending) || put_user(kp->sequence, &up->sequence) || - compat_put_timespec(&kp->timestamp, &up->timestamp) || + put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || + put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) || put_user(kp->id, &up->id) || copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32))) return -EFAULT; diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index dd1db678718c..cbb2ef43945f 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -1227,6 +1227,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, } EXPORT_SYMBOL(v4l2_ctrl_fill); +static u32 user_flags(const struct v4l2_ctrl *ctrl) +{ + u32 flags = ctrl->flags; + + if (ctrl->is_ptr) + flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD; + + return flags; +} + static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes) { memset(ev->reserved, 0, sizeof(ev->reserved)); @@ -1234,7 +1244,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change ev->id = ctrl->id; ev->u.ctrl.changes = changes; ev->u.ctrl.type = ctrl->type; - ev->u.ctrl.flags = ctrl->flags; + ev->u.ctrl.flags = user_flags(ctrl); if (ctrl->is_ptr) ev->u.ctrl.value64 = 0; else @@ -2003,10 +2013,6 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl, handler_set_err(hdl, err); return NULL; } - if (type == V4L2_CTRL_TYPE_BITMASK && ((def & ~max) || min || step)) { - handler_set_err(hdl, -ERANGE); - return NULL; - } if (is_array && (type == V4L2_CTRL_TYPE_BUTTON || type == V4L2_CTRL_TYPE_CTRL_CLASS)) { @@ -2577,10 +2583,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr else qc->id = ctrl->id; strlcpy(qc->name, ctrl->name, sizeof(qc->name)); - qc->flags = ctrl->flags; + qc->flags = user_flags(ctrl); qc->type = ctrl->type; - if (ctrl->is_ptr) - qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD; qc->elem_size = ctrl->elem_size; qc->elems = ctrl->elems; qc->nr_of_dims = ctrl->nr_of_dims; @@ -2818,7 +2822,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, static int class_check(struct v4l2_ctrl_handler *hdl, u32 which) { if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL) - return list_empty(&hdl->ctrl_refs) ? -EINVAL : 0; + return 0; return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL; } diff --git a/drivers/media/v4l2-core/v4l2-flash-led-class.c b/drivers/media/v4l2-core/v4l2-flash-led-class.c index 7b8288108e8a..4ceef217de83 100644 --- a/drivers/media/v4l2-core/v4l2-flash-led-class.c +++ b/drivers/media/v4l2-core/v4l2-flash-led-class.c @@ -18,7 +18,7 @@ #include <media/v4l2-flash-led-class.h> #define has_flash_op(v4l2_flash, op) \ - (v4l2_flash && v4l2_flash->ops->op) + (v4l2_flash && v4l2_flash->ops && v4l2_flash->ops->op) #define call_flash_op(v4l2_flash, op, arg) \ (has_flash_op(v4l2_flash, op) ? \ @@ -110,7 +110,7 @@ static void v4l2_flash_set_led_brightness(struct v4l2_flash *v4l2_flash, led_set_brightness_sync(&v4l2_flash->fled_cdev->led_cdev, brightness); } else { - led_set_brightness_sync(&v4l2_flash->iled_cdev->led_cdev, + led_set_brightness_sync(v4l2_flash->iled_cdev, brightness); } } @@ -133,7 +133,7 @@ static int v4l2_flash_update_led_brightness(struct v4l2_flash *v4l2_flash, return 0; led_cdev = &v4l2_flash->fled_cdev->led_cdev; } else { - led_cdev = &v4l2_flash->iled_cdev->led_cdev; + led_cdev = v4l2_flash->iled_cdev; } ret = led_update_brightness(led_cdev); @@ -197,7 +197,7 @@ static int v4l2_flash_s_ctrl(struct v4l2_ctrl *c) { struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c); struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; - struct led_classdev *led_cdev = &fled_cdev->led_cdev; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; bool external_strobe; int ret = 0; @@ -299,11 +299,26 @@ static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash, struct v4l2_flash_ctrl_data *ctrl_init_data) { struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; - const struct led_flash_ops *fled_cdev_ops = fled_cdev->ops; - struct led_classdev *led_cdev = &fled_cdev->led_cdev; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; struct v4l2_ctrl_config *ctrl_cfg; u32 mask; + /* Init INDICATOR_INTENSITY ctrl data */ + if (v4l2_flash->iled_cdev) { + ctrl_init_data[INDICATOR_INTENSITY].cid = + V4L2_CID_FLASH_INDICATOR_INTENSITY; + ctrl_cfg = &ctrl_init_data[INDICATOR_INTENSITY].config; + __lfs_to_v4l2_ctrl_config(&flash_cfg->intensity, + ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_INDICATOR_INTENSITY; + ctrl_cfg->min = 0; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + } + + if (!led_cdev || WARN_ON(!(led_cdev->flags & LED_DEV_CAP_FLASH))) + return; + /* Init FLASH_FAULT ctrl data */ if (flash_cfg->flash_faults) { ctrl_init_data[FLASH_FAULT].cid = V4L2_CID_FLASH_FAULT; @@ -331,27 +346,11 @@ static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash, /* Init TORCH_INTENSITY ctrl data */ ctrl_init_data[TORCH_INTENSITY].cid = V4L2_CID_FLASH_TORCH_INTENSITY; ctrl_cfg = &ctrl_init_data[TORCH_INTENSITY].config; - __lfs_to_v4l2_ctrl_config(&flash_cfg->torch_intensity, ctrl_cfg); + __lfs_to_v4l2_ctrl_config(&flash_cfg->intensity, ctrl_cfg); ctrl_cfg->id = V4L2_CID_FLASH_TORCH_INTENSITY; ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; - /* Init INDICATOR_INTENSITY ctrl data */ - if (v4l2_flash->iled_cdev) { - ctrl_init_data[INDICATOR_INTENSITY].cid = - V4L2_CID_FLASH_INDICATOR_INTENSITY; - ctrl_cfg = &ctrl_init_data[INDICATOR_INTENSITY].config; - __lfs_to_v4l2_ctrl_config(&flash_cfg->indicator_intensity, - ctrl_cfg); - ctrl_cfg->id = V4L2_CID_FLASH_INDICATOR_INTENSITY; - ctrl_cfg->min = 0; - ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | - V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; - } - - if (!(led_cdev->flags & LED_DEV_CAP_FLASH)) - return; - /* Init FLASH_STROBE ctrl data */ ctrl_init_data[FLASH_STROBE].cid = V4L2_CID_FLASH_STROBE; ctrl_cfg = &ctrl_init_data[FLASH_STROBE].config; @@ -376,7 +375,7 @@ static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash, } /* Init STROBE_STATUS ctrl data */ - if (fled_cdev_ops->strobe_get) { + if (has_flash_op(fled_cdev, strobe_get)) { ctrl_init_data[STROBE_STATUS].cid = V4L2_CID_FLASH_STROBE_STATUS; ctrl_cfg = &ctrl_init_data[STROBE_STATUS].config; @@ -386,7 +385,7 @@ static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash, } /* Init FLASH_TIMEOUT ctrl data */ - if (fled_cdev_ops->timeout_set) { + if (has_flash_op(fled_cdev, timeout_set)) { ctrl_init_data[FLASH_TIMEOUT].cid = V4L2_CID_FLASH_TIMEOUT; ctrl_cfg = &ctrl_init_data[FLASH_TIMEOUT].config; __lfs_to_v4l2_ctrl_config(&fled_cdev->timeout, ctrl_cfg); @@ -394,7 +393,7 @@ static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash, } /* Init FLASH_INTENSITY ctrl data */ - if (fled_cdev_ops->flash_brightness_set) { + if (has_flash_op(fled_cdev, flash_brightness_set)) { ctrl_init_data[FLASH_INTENSITY].cid = V4L2_CID_FLASH_INTENSITY; ctrl_cfg = &ctrl_init_data[FLASH_INTENSITY].config; __lfs_to_v4l2_ctrl_config(&fled_cdev->brightness, ctrl_cfg); @@ -486,7 +485,9 @@ static int __sync_device_with_v4l2_controls(struct v4l2_flash *v4l2_flash) struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; int ret = 0; - v4l2_flash_set_led_brightness(v4l2_flash, ctrls[TORCH_INTENSITY]); + if (ctrls[TORCH_INTENSITY]) + v4l2_flash_set_led_brightness(v4l2_flash, + ctrls[TORCH_INTENSITY]); if (ctrls[INDICATOR_INTENSITY]) v4l2_flash_set_led_brightness(v4l2_flash, @@ -528,24 +529,23 @@ static int v4l2_flash_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd); struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; - struct led_classdev *led_cdev = &fled_cdev->led_cdev; - struct led_classdev_flash *iled_cdev = v4l2_flash->iled_cdev; - struct led_classdev *led_cdev_ind = NULL; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; + struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev; int ret = 0; if (!v4l2_fh_is_singular(&fh->vfh)) return 0; - mutex_lock(&led_cdev->led_access); - - led_sysfs_disable(led_cdev); - led_trigger_remove(led_cdev); + if (led_cdev) { + mutex_lock(&led_cdev->led_access); - mutex_unlock(&led_cdev->led_access); + led_sysfs_disable(led_cdev); + led_trigger_remove(led_cdev); - if (iled_cdev) { - led_cdev_ind = &iled_cdev->led_cdev; + mutex_unlock(&led_cdev->led_access); + } + if (led_cdev_ind) { mutex_lock(&led_cdev_ind->led_access); led_sysfs_disable(led_cdev_ind); @@ -560,9 +560,11 @@ static int v4l2_flash_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) return 0; out_sync_device: - mutex_lock(&led_cdev->led_access); - led_sysfs_enable(led_cdev); - mutex_unlock(&led_cdev->led_access); + if (led_cdev) { + mutex_lock(&led_cdev->led_access); + led_sysfs_enable(led_cdev); + mutex_unlock(&led_cdev->led_access); + } if (led_cdev_ind) { mutex_lock(&led_cdev_ind->led_access); @@ -577,25 +579,26 @@ static int v4l2_flash_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd); struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; - struct led_classdev *led_cdev = &fled_cdev->led_cdev; - struct led_classdev_flash *iled_cdev = v4l2_flash->iled_cdev; + struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL; + struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev; int ret = 0; if (!v4l2_fh_is_singular(&fh->vfh)) return 0; - mutex_lock(&led_cdev->led_access); + if (led_cdev) { + mutex_lock(&led_cdev->led_access); - if (v4l2_flash->ctrls[STROBE_SOURCE]) - ret = v4l2_ctrl_s_ctrl(v4l2_flash->ctrls[STROBE_SOURCE], + if (v4l2_flash->ctrls[STROBE_SOURCE]) + ret = v4l2_ctrl_s_ctrl( + v4l2_flash->ctrls[STROBE_SOURCE], V4L2_FLASH_STROBE_SOURCE_SOFTWARE); - led_sysfs_enable(led_cdev); - - mutex_unlock(&led_cdev->led_access); + led_sysfs_enable(led_cdev); - if (iled_cdev) { - struct led_classdev *led_cdev_ind = &iled_cdev->led_cdev; + mutex_unlock(&led_cdev->led_access); + } + if (led_cdev_ind) { mutex_lock(&led_cdev_ind->led_access); led_sysfs_enable(led_cdev_ind); mutex_unlock(&led_cdev_ind->led_access); @@ -611,25 +614,19 @@ static const struct v4l2_subdev_internal_ops v4l2_flash_subdev_internal_ops = { static const struct v4l2_subdev_ops v4l2_flash_subdev_ops; -struct v4l2_flash *v4l2_flash_init( +static struct v4l2_flash *__v4l2_flash_init( struct device *dev, struct fwnode_handle *fwn, - struct led_classdev_flash *fled_cdev, - struct led_classdev_flash *iled_cdev, - const struct v4l2_flash_ops *ops, - struct v4l2_flash_config *config) + struct led_classdev_flash *fled_cdev, struct led_classdev *iled_cdev, + const struct v4l2_flash_ops *ops, struct v4l2_flash_config *config) { struct v4l2_flash *v4l2_flash; - struct led_classdev *led_cdev; struct v4l2_subdev *sd; int ret; - if (!fled_cdev || !ops || !config) + if (!config) return ERR_PTR(-EINVAL); - led_cdev = &fled_cdev->led_cdev; - - v4l2_flash = devm_kzalloc(led_cdev->dev, sizeof(*v4l2_flash), - GFP_KERNEL); + v4l2_flash = devm_kzalloc(dev, sizeof(*v4l2_flash), GFP_KERNEL); if (!v4l2_flash) return ERR_PTR(-ENOMEM); @@ -638,7 +635,7 @@ struct v4l2_flash *v4l2_flash_init( v4l2_flash->iled_cdev = iled_cdev; v4l2_flash->ops = ops; sd->dev = dev; - sd->fwnode = fwn ? fwn : dev_fwnode(led_cdev->dev); + sd->fwnode = fwn ? fwn : dev_fwnode(dev); v4l2_subdev_init(sd, &v4l2_flash_subdev_ops); sd->internal_ops = &v4l2_flash_subdev_internal_ops; sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; @@ -670,8 +667,26 @@ err_init_controls: return ERR_PTR(ret); } + +struct v4l2_flash *v4l2_flash_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev_flash *fled_cdev, + const struct v4l2_flash_ops *ops, + struct v4l2_flash_config *config) +{ + return __v4l2_flash_init(dev, fwn, fled_cdev, NULL, ops, config); +} EXPORT_SYMBOL_GPL(v4l2_flash_init); +struct v4l2_flash *v4l2_flash_indicator_init( + struct device *dev, struct fwnode_handle *fwn, + struct led_classdev *iled_cdev, + struct v4l2_flash_config *config) +{ + return __v4l2_flash_init(dev, fwn, NULL, iled_cdev, NULL, config); +} +EXPORT_SYMBOL_GPL(v4l2_flash_indicator_init); + void v4l2_flash_release(struct v4l2_flash *v4l2_flash) { struct v4l2_subdev *sd; diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c index 153c53ca3925..681b192420d9 100644 --- a/drivers/media/v4l2-core/v4l2-fwnode.c +++ b/drivers/media/v4l2-core/v4l2-fwnode.c @@ -19,6 +19,7 @@ */ #include <linux/acpi.h> #include <linux/kernel.h> +#include <linux/mm.h> #include <linux/module.h> #include <linux/of.h> #include <linux/property.h> @@ -26,10 +27,20 @@ #include <linux/string.h> #include <linux/types.h> +#include <media/v4l2-async.h> #include <media/v4l2-fwnode.h> - -static int v4l2_fwnode_endpoint_parse_csi_bus(struct fwnode_handle *fwnode, - struct v4l2_fwnode_endpoint *vep) +#include <media/v4l2-subdev.h> + +enum v4l2_fwnode_bus_type { + V4L2_FWNODE_BUS_TYPE_GUESS = 0, + V4L2_FWNODE_BUS_TYPE_CSI2_CPHY, + V4L2_FWNODE_BUS_TYPE_CSI1, + V4L2_FWNODE_BUS_TYPE_CCP2, + NR_OF_V4L2_FWNODE_BUS_TYPE, +}; + +static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep) { struct v4l2_fwnode_bus_mipi_csi2 *bus = &vep->bus.mipi_csi2; bool have_clk_lane = false; @@ -40,10 +51,10 @@ static int v4l2_fwnode_endpoint_parse_csi_bus(struct fwnode_handle *fwnode, rval = fwnode_property_read_u32_array(fwnode, "data-lanes", NULL, 0); if (rval > 0) { - u32 array[ARRAY_SIZE(bus->data_lanes)]; + u32 array[1 + V4L2_FWNODE_CSI2_MAX_DATA_LANES]; bus->num_data_lanes = - min_t(int, ARRAY_SIZE(bus->data_lanes), rval); + min_t(int, V4L2_FWNODE_CSI2_MAX_DATA_LANES, rval); fwnode_property_read_u32_array(fwnode, "data-lanes", array, bus->num_data_lanes); @@ -56,24 +67,25 @@ static int v4l2_fwnode_endpoint_parse_csi_bus(struct fwnode_handle *fwnode, bus->data_lanes[i] = array[i]; } - } - - rval = fwnode_property_read_u32_array(fwnode, "lane-polarities", NULL, - 0); - if (rval > 0) { - u32 array[ARRAY_SIZE(bus->lane_polarities)]; - if (rval < 1 + bus->num_data_lanes /* clock + data */) { - pr_warn("too few lane-polarities entries (need %u, got %u)\n", - 1 + bus->num_data_lanes, rval); - return -EINVAL; + rval = fwnode_property_read_u32_array(fwnode, + "lane-polarities", NULL, + 0); + if (rval > 0) { + if (rval != 1 + bus->num_data_lanes /* clock+data */) { + pr_warn("invalid number of lane-polarities entries (need %u, got %u)\n", + 1 + bus->num_data_lanes, rval); + return -EINVAL; + } + + fwnode_property_read_u32_array(fwnode, + "lane-polarities", array, + 1 + bus->num_data_lanes); + + for (i = 0; i < 1 + bus->num_data_lanes; i++) + bus->lane_polarities[i] = array[i]; } - fwnode_property_read_u32_array(fwnode, "lane-polarities", array, - 1 + bus->num_data_lanes); - - for (i = 0; i < 1 + bus->num_data_lanes; i++) - bus->lane_polarities[i] = array[i]; } if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) { @@ -146,28 +158,36 @@ static void v4l2_fwnode_endpoint_parse_parallel_bus( } -/** - * v4l2_fwnode_endpoint_parse() - parse all fwnode node properties - * @fwnode: pointer to the endpoint's fwnode handle - * @vep: pointer to the V4L2 fwnode data structure - * - * All properties are optional. If none are found, we don't set any flags. This - * means the port has a static configuration and no properties have to be - * specified explicitly. If any properties that identify the bus as parallel - * are found and slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if - * we recognise the bus as serial CSI-2 and clock-noncontinuous isn't set, we - * set the V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag. The caller should hold a - * reference to @fwnode. - * - * NOTE: This function does not parse properties the size of which is variable - * without a low fixed limit. Please use v4l2_fwnode_endpoint_alloc_parse() in - * new drivers instead. - * - * Return: 0 on success or a negative error code on failure. - */ +static void +v4l2_fwnode_endpoint_parse_csi1_bus(struct fwnode_handle *fwnode, + struct v4l2_fwnode_endpoint *vep, + u32 bus_type) +{ + struct v4l2_fwnode_bus_mipi_csi1 *bus = &vep->bus.mipi_csi1; + u32 v; + + if (!fwnode_property_read_u32(fwnode, "clock-inv", &v)) + bus->clock_inv = v; + + if (!fwnode_property_read_u32(fwnode, "strobe", &v)) + bus->strobe = v; + + if (!fwnode_property_read_u32(fwnode, "data-lanes", &v)) + bus->data_lane = v; + + if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) + bus->clock_lane = v; + + if (bus_type == V4L2_FWNODE_BUS_TYPE_CCP2) + vep->bus_type = V4L2_MBUS_CCP2; + else + vep->bus_type = V4L2_MBUS_CSI1; +} + int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode, struct v4l2_fwnode_endpoint *vep) { + u32 bus_type = 0; int rval; fwnode_graph_parse_endpoint(fwnode, &vep->base); @@ -176,28 +196,33 @@ int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode, memset(&vep->bus_type, 0, sizeof(*vep) - offsetof(typeof(*vep), bus_type)); - rval = v4l2_fwnode_endpoint_parse_csi_bus(fwnode, vep); - if (rval) - return rval; - /* - * Parse the parallel video bus properties only if none - * of the MIPI CSI-2 specific properties were found. - */ - if (vep->bus.mipi_csi2.flags == 0) - v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep); - - return 0; + fwnode_property_read_u32(fwnode, "bus-type", &bus_type); + + switch (bus_type) { + case V4L2_FWNODE_BUS_TYPE_GUESS: + rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep); + if (rval) + return rval; + /* + * Parse the parallel video bus properties only if none + * of the MIPI CSI-2 specific properties were found. + */ + if (vep->bus.mipi_csi2.flags == 0) + v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep); + + return 0; + case V4L2_FWNODE_BUS_TYPE_CCP2: + case V4L2_FWNODE_BUS_TYPE_CSI1: + v4l2_fwnode_endpoint_parse_csi1_bus(fwnode, vep, bus_type); + + return 0; + default: + pr_warn("unsupported bus type %u\n", bus_type); + return -EINVAL; + } } EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_parse); -/* - * v4l2_fwnode_endpoint_free() - free the V4L2 fwnode acquired by - * v4l2_fwnode_endpoint_alloc_parse() - * @vep - the V4L2 fwnode the resources of which are to be released - * - * It is safe to call this function with NULL argument or on a V4L2 fwnode the - * parsing of which failed. - */ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep) { if (IS_ERR_OR_NULL(vep)) @@ -208,29 +233,6 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep) } EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_free); -/** - * v4l2_fwnode_endpoint_alloc_parse() - parse all fwnode node properties - * @fwnode: pointer to the endpoint's fwnode handle - * - * All properties are optional. If none are found, we don't set any flags. This - * means the port has a static configuration and no properties have to be - * specified explicitly. If any properties that identify the bus as parallel - * are found and slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if - * we recognise the bus as serial CSI-2 and clock-noncontinuous isn't set, we - * set the V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag. The caller should hold a - * reference to @fwnode. - * - * v4l2_fwnode_endpoint_alloc_parse() has two important differences to - * v4l2_fwnode_endpoint_parse(): - * - * 1. It also parses variable size data. - * - * 2. The memory it has allocated to store the variable size data must be freed - * using v4l2_fwnode_endpoint_free() when no longer needed. - * - * Return: Pointer to v4l2_fwnode_endpoint if successful, on an error pointer - * on error. - */ struct v4l2_fwnode_endpoint *v4l2_fwnode_endpoint_alloc_parse( struct fwnode_handle *fwnode) { @@ -247,23 +249,23 @@ struct v4l2_fwnode_endpoint *v4l2_fwnode_endpoint_alloc_parse( rval = fwnode_property_read_u64_array(fwnode, "link-frequencies", NULL, 0); - if (rval < 0) - goto out_err; - - vep->link_frequencies = - kmalloc_array(rval, sizeof(*vep->link_frequencies), GFP_KERNEL); - if (!vep->link_frequencies) { - rval = -ENOMEM; - goto out_err; - } + if (rval > 0) { + vep->link_frequencies = + kmalloc_array(rval, sizeof(*vep->link_frequencies), + GFP_KERNEL); + if (!vep->link_frequencies) { + rval = -ENOMEM; + goto out_err; + } - vep->nr_of_link_frequencies = rval; + vep->nr_of_link_frequencies = rval; - rval = fwnode_property_read_u64_array(fwnode, "link-frequencies", - vep->link_frequencies, - vep->nr_of_link_frequencies); - if (rval < 0) - goto out_err; + rval = fwnode_property_read_u64_array( + fwnode, "link-frequencies", vep->link_frequencies, + vep->nr_of_link_frequencies); + if (rval < 0) + goto out_err; + } return vep; @@ -273,24 +275,6 @@ out_err: } EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse); -/** - * v4l2_fwnode_endpoint_parse_link() - parse a link between two endpoints - * @__fwnode: pointer to the endpoint's fwnode at the local end of the link - * @link: pointer to the V4L2 fwnode link data structure - * - * Fill the link structure with the local and remote nodes and port numbers. - * The local_node and remote_node fields are set to point to the local and - * remote port's parent nodes respectively (the port parent node being the - * parent node of the port node if that node isn't a 'ports' node, or the - * grand-parent node of the port node otherwise). - * - * A reference is taken to both the local and remote nodes, the caller must use - * v4l2_fwnode_endpoint_put_link() to drop the references when done with the - * link. - * - * Return: 0 on success, or -ENOLINK if the remote endpoint fwnode can't be - * found. - */ int v4l2_fwnode_parse_link(struct fwnode_handle *__fwnode, struct v4l2_fwnode_link *link) { @@ -325,13 +309,6 @@ int v4l2_fwnode_parse_link(struct fwnode_handle *__fwnode, } EXPORT_SYMBOL_GPL(v4l2_fwnode_parse_link); -/** - * v4l2_fwnode_put_link() - drop references to nodes in a link - * @link: pointer to the V4L2 fwnode link data structure - * - * Drop references to the local and remote nodes in the link. This function - * must be called on every link parsed with v4l2_fwnode_parse_link(). - */ void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link) { fwnode_handle_put(link->local_node); @@ -339,6 +316,630 @@ void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link) } EXPORT_SYMBOL_GPL(v4l2_fwnode_put_link); +static int v4l2_async_notifier_realloc(struct v4l2_async_notifier *notifier, + unsigned int max_subdevs) +{ + struct v4l2_async_subdev **subdevs; + + if (max_subdevs <= notifier->max_subdevs) + return 0; + + subdevs = kvmalloc_array( + max_subdevs, sizeof(*notifier->subdevs), + GFP_KERNEL | __GFP_ZERO); + if (!subdevs) + return -ENOMEM; + + if (notifier->subdevs) { + memcpy(subdevs, notifier->subdevs, + sizeof(*subdevs) * notifier->num_subdevs); + + kvfree(notifier->subdevs); + } + + notifier->subdevs = subdevs; + notifier->max_subdevs = max_subdevs; + + return 0; +} + +static int v4l2_async_notifier_fwnode_parse_endpoint( + struct device *dev, struct v4l2_async_notifier *notifier, + struct fwnode_handle *endpoint, unsigned int asd_struct_size, + int (*parse_endpoint)(struct device *dev, + struct v4l2_fwnode_endpoint *vep, + struct v4l2_async_subdev *asd)) +{ + struct v4l2_async_subdev *asd; + struct v4l2_fwnode_endpoint *vep; + int ret = 0; + + asd = kzalloc(asd_struct_size, GFP_KERNEL); + if (!asd) + return -ENOMEM; + + asd->match_type = V4L2_ASYNC_MATCH_FWNODE; + asd->match.fwnode.fwnode = + fwnode_graph_get_remote_port_parent(endpoint); + if (!asd->match.fwnode.fwnode) { + dev_warn(dev, "bad remote port parent\n"); + ret = -EINVAL; + goto out_err; + } + + vep = v4l2_fwnode_endpoint_alloc_parse(endpoint); + if (IS_ERR(vep)) { + ret = PTR_ERR(vep); + dev_warn(dev, "unable to parse V4L2 fwnode endpoint (%d)\n", + ret); + goto out_err; + } + + ret = parse_endpoint ? parse_endpoint(dev, vep, asd) : 0; + if (ret == -ENOTCONN) + dev_dbg(dev, "ignoring port@%u/endpoint@%u\n", vep->base.port, + vep->base.id); + else if (ret < 0) + dev_warn(dev, + "driver could not parse port@%u/endpoint@%u (%d)\n", + vep->base.port, vep->base.id, ret); + v4l2_fwnode_endpoint_free(vep); + if (ret < 0) + goto out_err; + + notifier->subdevs[notifier->num_subdevs] = asd; + notifier->num_subdevs++; + + return 0; + +out_err: + fwnode_handle_put(asd->match.fwnode.fwnode); + kfree(asd); + + return ret == -ENOTCONN ? 0 : ret; +} + +static int __v4l2_async_notifier_parse_fwnode_endpoints( + struct device *dev, struct v4l2_async_notifier *notifier, + size_t asd_struct_size, unsigned int port, bool has_port, + int (*parse_endpoint)(struct device *dev, + struct v4l2_fwnode_endpoint *vep, + struct v4l2_async_subdev *asd)) +{ + struct fwnode_handle *fwnode; + unsigned int max_subdevs = notifier->max_subdevs; + int ret; + + if (WARN_ON(asd_struct_size < sizeof(struct v4l2_async_subdev))) + return -EINVAL; + + for (fwnode = NULL; (fwnode = fwnode_graph_get_next_endpoint( + dev_fwnode(dev), fwnode)); ) { + struct fwnode_handle *dev_fwnode; + bool is_available; + + dev_fwnode = fwnode_graph_get_port_parent(fwnode); + is_available = fwnode_device_is_available(dev_fwnode); + fwnode_handle_put(dev_fwnode); + if (!is_available) + continue; + + if (has_port) { + struct fwnode_endpoint ep; + + ret = fwnode_graph_parse_endpoint(fwnode, &ep); + if (ret) { + fwnode_handle_put(fwnode); + return ret; + } + + if (ep.port != port) + continue; + } + max_subdevs++; + } + + /* No subdevs to add? Return here. */ + if (max_subdevs == notifier->max_subdevs) + return 0; + + ret = v4l2_async_notifier_realloc(notifier, max_subdevs); + if (ret) + return ret; + + for (fwnode = NULL; (fwnode = fwnode_graph_get_next_endpoint( + dev_fwnode(dev), fwnode)); ) { + struct fwnode_handle *dev_fwnode; + bool is_available; + + dev_fwnode = fwnode_graph_get_port_parent(fwnode); + is_available = fwnode_device_is_available(dev_fwnode); + fwnode_handle_put(dev_fwnode); + if (!is_available) + continue; + + if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) { + ret = -EINVAL; + break; + } + + if (has_port) { + struct fwnode_endpoint ep; + + ret = fwnode_graph_parse_endpoint(fwnode, &ep); + if (ret) + break; + + if (ep.port != port) + continue; + } + + ret = v4l2_async_notifier_fwnode_parse_endpoint( + dev, notifier, fwnode, asd_struct_size, parse_endpoint); + if (ret < 0) + break; + } + + fwnode_handle_put(fwnode); + + return ret; +} + +int v4l2_async_notifier_parse_fwnode_endpoints( + struct device *dev, struct v4l2_async_notifier *notifier, + size_t asd_struct_size, + int (*parse_endpoint)(struct device *dev, + struct v4l2_fwnode_endpoint *vep, + struct v4l2_async_subdev *asd)) +{ + return __v4l2_async_notifier_parse_fwnode_endpoints( + dev, notifier, asd_struct_size, 0, false, parse_endpoint); +} +EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_endpoints); + +int v4l2_async_notifier_parse_fwnode_endpoints_by_port( + struct device *dev, struct v4l2_async_notifier *notifier, + size_t asd_struct_size, unsigned int port, + int (*parse_endpoint)(struct device *dev, + struct v4l2_fwnode_endpoint *vep, + struct v4l2_async_subdev *asd)) +{ + return __v4l2_async_notifier_parse_fwnode_endpoints( + dev, notifier, asd_struct_size, port, true, parse_endpoint); +} +EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_endpoints_by_port); + +/* + * v4l2_fwnode_reference_parse - parse references for async sub-devices + * @dev: the device node the properties of which are parsed for references + * @notifier: the async notifier where the async subdevs will be added + * @prop: the name of the property + * + * Return: 0 on success + * -ENOENT if no entries were found + * -ENOMEM if memory allocation failed + * -EINVAL if property parsing failed + */ +static int v4l2_fwnode_reference_parse( + struct device *dev, struct v4l2_async_notifier *notifier, + const char *prop) +{ + struct fwnode_reference_args args; + unsigned int index; + int ret; + + for (index = 0; + !(ret = fwnode_property_get_reference_args( + dev_fwnode(dev), prop, NULL, 0, index, &args)); + index++) + fwnode_handle_put(args.fwnode); + + if (!index) + return -ENOENT; + + /* + * Note that right now both -ENODATA and -ENOENT may signal + * out-of-bounds access. Return the error in cases other than that. + */ + if (ret != -ENOENT && ret != -ENODATA) + return ret; + + ret = v4l2_async_notifier_realloc(notifier, + notifier->num_subdevs + index); + if (ret) + return ret; + + for (index = 0; !fwnode_property_get_reference_args( + dev_fwnode(dev), prop, NULL, 0, index, &args); + index++) { + struct v4l2_async_subdev *asd; + + if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) { + ret = -EINVAL; + goto error; + } + + asd = kzalloc(sizeof(*asd), GFP_KERNEL); + if (!asd) { + ret = -ENOMEM; + goto error; + } + + notifier->subdevs[notifier->num_subdevs] = asd; + asd->match.fwnode.fwnode = args.fwnode; + asd->match_type = V4L2_ASYNC_MATCH_FWNODE; + notifier->num_subdevs++; + } + + return 0; + +error: + fwnode_handle_put(args.fwnode); + return ret; +} + +/* + * v4l2_fwnode_reference_get_int_prop - parse a reference with integer + * arguments + * @fwnode: fwnode to read @prop from + * @notifier: notifier for @dev + * @prop: the name of the property + * @index: the index of the reference to get + * @props: the array of integer property names + * @nprops: the number of integer property names in @nprops + * + * First find an fwnode referred to by the reference at @index in @prop. + * + * Then under that fwnode, @nprops times, for each property in @props, + * iteratively follow child nodes starting from fwnode such that they have the + * property in @props array at the index of the child node distance from the + * root node and the value of that property matching with the integer argument + * of the reference, at the same index. + * + * The child fwnode reched at the end of the iteration is then returned to the + * caller. + * + * The core reason for this is that you cannot refer to just any node in ACPI. + * So to refer to an endpoint (easy in DT) you need to refer to a device, then + * provide a list of (property name, property value) tuples where each tuple + * uniquely identifies a child node. The first tuple identifies a child directly + * underneath the device fwnode, the next tuple identifies a child node + * underneath the fwnode identified by the previous tuple, etc. until you + * reached the fwnode you need. + * + * An example with a graph, as defined in Documentation/acpi/dsd/graph.txt: + * + * Scope (\_SB.PCI0.I2C2) + * { + * Device (CAM0) + * { + * Name (_DSD, Package () { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { + * "compatible", + * Package () { "nokia,smia" } + * }, + * }, + * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + * Package () { + * Package () { "port0", "PRT0" }, + * } + * }) + * Name (PRT0, Package() { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { "port", 0 }, + * }, + * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + * Package () { + * Package () { "endpoint0", "EP00" }, + * } + * }) + * Name (EP00, Package() { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { "endpoint", 0 }, + * Package () { + * "remote-endpoint", + * Package() { + * \_SB.PCI0.ISP, 4, 0 + * } + * }, + * } + * }) + * } + * } + * + * Scope (\_SB.PCI0) + * { + * Device (ISP) + * { + * Name (_DSD, Package () { + * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + * Package () { + * Package () { "port4", "PRT4" }, + * } + * }) + * + * Name (PRT4, Package() { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { "port", 4 }, + * }, + * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), + * Package () { + * Package () { "endpoint0", "EP40" }, + * } + * }) + * + * Name (EP40, Package() { + * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), + * Package () { + * Package () { "endpoint", 0 }, + * Package () { + * "remote-endpoint", + * Package () { + * \_SB.PCI0.I2C2.CAM0, + * 0, 0 + * } + * }, + * } + * }) + * } + * } + * + * From the EP40 node under ISP device, you could parse the graph remote + * endpoint using v4l2_fwnode_reference_get_int_prop with these arguments: + * + * @fwnode: fwnode referring to EP40 under ISP. + * @prop: "remote-endpoint" + * @index: 0 + * @props: "port", "endpoint" + * @nprops: 2 + * + * And you'd get back fwnode referring to EP00 under CAM0. + * + * The same works the other way around: if you use EP00 under CAM0 as the + * fwnode, you'll get fwnode referring to EP40 under ISP. + * + * The same example in DT syntax would look like this: + * + * cam: cam0 { + * compatible = "nokia,smia"; + * + * port { + * port = <0>; + * endpoint { + * endpoint = <0>; + * remote-endpoint = <&isp 4 0>; + * }; + * }; + * }; + * + * isp: isp { + * ports { + * port@4 { + * port = <4>; + * endpoint { + * endpoint = <0>; + * remote-endpoint = <&cam 0 0>; + * }; + * }; + * }; + * }; + * + * Return: 0 on success + * -ENOENT if no entries (or the property itself) were found + * -EINVAL if property parsing otherwise failed + * -ENOMEM if memory allocation failed + */ +static struct fwnode_handle *v4l2_fwnode_reference_get_int_prop( + struct fwnode_handle *fwnode, const char *prop, unsigned int index, + const char * const *props, unsigned int nprops) +{ + struct fwnode_reference_args fwnode_args; + unsigned int *args = fwnode_args.args; + struct fwnode_handle *child; + int ret; + + /* + * Obtain remote fwnode as well as the integer arguments. + * + * Note that right now both -ENODATA and -ENOENT may signal + * out-of-bounds access. Return -ENOENT in that case. + */ + ret = fwnode_property_get_reference_args(fwnode, prop, NULL, nprops, + index, &fwnode_args); + if (ret) + return ERR_PTR(ret == -ENODATA ? -ENOENT : ret); + + /* + * Find a node in the tree under the referred fwnode corresponding to + * the integer arguments. + */ + fwnode = fwnode_args.fwnode; + while (nprops--) { + u32 val; + + /* Loop over all child nodes under fwnode. */ + fwnode_for_each_child_node(fwnode, child) { + if (fwnode_property_read_u32(child, *props, &val)) + continue; + + /* Found property, see if its value matches. */ + if (val == *args) + break; + } + + fwnode_handle_put(fwnode); + + /* No property found; return an error here. */ + if (!child) { + fwnode = ERR_PTR(-ENOENT); + break; + } + + props++; + args++; + fwnode = child; + } + + return fwnode; +} + +/* + * v4l2_fwnode_reference_parse_int_props - parse references for async + * sub-devices + * @dev: struct device pointer + * @notifier: notifier for @dev + * @prop: the name of the property + * @props: the array of integer property names + * @nprops: the number of integer properties + * + * Use v4l2_fwnode_reference_get_int_prop to find fwnodes through reference in + * property @prop with integer arguments with child nodes matching in properties + * @props. Then, set up V4L2 async sub-devices for those fwnodes in the notifier + * accordingly. + * + * While it is technically possible to use this function on DT, it is only + * meaningful on ACPI. On Device tree you can refer to any node in the tree but + * on ACPI the references are limited to devices. + * + * Return: 0 on success + * -ENOENT if no entries (or the property itself) were found + * -EINVAL if property parsing otherwisefailed + * -ENOMEM if memory allocation failed + */ +static int v4l2_fwnode_reference_parse_int_props( + struct device *dev, struct v4l2_async_notifier *notifier, + const char *prop, const char * const *props, unsigned int nprops) +{ + struct fwnode_handle *fwnode; + unsigned int index; + int ret; + + for (index = 0; !IS_ERR((fwnode = v4l2_fwnode_reference_get_int_prop( + dev_fwnode(dev), prop, index, props, + nprops))); index++) + fwnode_handle_put(fwnode); + + /* + * Note that right now both -ENODATA and -ENOENT may signal + * out-of-bounds access. Return the error in cases other than that. + */ + if (PTR_ERR(fwnode) != -ENOENT && PTR_ERR(fwnode) != -ENODATA) + return PTR_ERR(fwnode); + + ret = v4l2_async_notifier_realloc(notifier, + notifier->num_subdevs + index); + if (ret) + return -ENOMEM; + + for (index = 0; !IS_ERR((fwnode = v4l2_fwnode_reference_get_int_prop( + dev_fwnode(dev), prop, index, props, + nprops))); index++) { + struct v4l2_async_subdev *asd; + + if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) { + ret = -EINVAL; + goto error; + } + + asd = kzalloc(sizeof(struct v4l2_async_subdev), GFP_KERNEL); + if (!asd) { + ret = -ENOMEM; + goto error; + } + + notifier->subdevs[notifier->num_subdevs] = asd; + asd->match.fwnode.fwnode = fwnode; + asd->match_type = V4L2_ASYNC_MATCH_FWNODE; + notifier->num_subdevs++; + } + + return PTR_ERR(fwnode) == -ENOENT ? 0 : PTR_ERR(fwnode); + +error: + fwnode_handle_put(fwnode); + return ret; +} + +int v4l2_async_notifier_parse_fwnode_sensor_common( + struct device *dev, struct v4l2_async_notifier *notifier) +{ + static const char * const led_props[] = { "led" }; + static const struct { + const char *name; + const char * const *props; + unsigned int nprops; + } props[] = { + { "flash-leds", led_props, ARRAY_SIZE(led_props) }, + { "lens-focus", NULL, 0 }, + }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(props); i++) { + int ret; + + if (props[i].props && is_acpi_node(dev_fwnode(dev))) + ret = v4l2_fwnode_reference_parse_int_props( + dev, notifier, props[i].name, + props[i].props, props[i].nprops); + else + ret = v4l2_fwnode_reference_parse( + dev, notifier, props[i].name); + if (ret && ret != -ENOENT) { + dev_warn(dev, "parsing property \"%s\" failed (%d)\n", + props[i].name, ret); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_sensor_common); + +int v4l2_async_register_subdev_sensor_common(struct v4l2_subdev *sd) +{ + struct v4l2_async_notifier *notifier; + int ret; + + if (WARN_ON(!sd->dev)) + return -ENODEV; + + notifier = kzalloc(sizeof(*notifier), GFP_KERNEL); + if (!notifier) + return -ENOMEM; + + ret = v4l2_async_notifier_parse_fwnode_sensor_common(sd->dev, + notifier); + if (ret < 0) + goto out_cleanup; + + ret = v4l2_async_subdev_notifier_register(sd, notifier); + if (ret < 0) + goto out_cleanup; + + ret = v4l2_async_register_subdev(sd); + if (ret < 0) + goto out_unregister; + + sd->subdev_notifier = notifier; + + return 0; + +out_unregister: + v4l2_async_notifier_unregister(notifier); + +out_cleanup: + v4l2_async_notifier_cleanup(notifier); + kfree(notifier); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_async_register_subdev_sensor_common); + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>"); MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>"); diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index cab63bb49c97..79614992ee21 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -730,9 +730,12 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only) break; case V4L2_FRMSIZE_TYPE_STEPWISE: pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n", - p->stepwise.min_width, p->stepwise.min_height, - p->stepwise.step_width, p->stepwise.step_height, - p->stepwise.max_width, p->stepwise.max_height); + p->stepwise.min_width, + p->stepwise.min_height, + p->stepwise.max_width, + p->stepwise.max_height, + p->stepwise.step_width, + p->stepwise.step_height); break; case V4L2_FRMSIZE_TYPE_CONTINUOUS: /* fall through */ @@ -1195,10 +1198,6 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_SGBRG10: descr = "10-bit Bayer GBGB/RGRG"; break; case V4L2_PIX_FMT_SGRBG10: descr = "10-bit Bayer GRGR/BGBG"; break; case V4L2_PIX_FMT_SRGGB10: descr = "10-bit Bayer RGRG/GBGB"; break; - case V4L2_PIX_FMT_SBGGR12: descr = "12-bit Bayer BGBG/GRGR"; break; - case V4L2_PIX_FMT_SGBRG12: descr = "12-bit Bayer GBGB/RGRG"; break; - case V4L2_PIX_FMT_SGRBG12: descr = "12-bit Bayer GRGR/BGBG"; break; - case V4L2_PIX_FMT_SRGGB12: descr = "12-bit Bayer RGRG/GBGB"; break; case V4L2_PIX_FMT_SBGGR10P: descr = "10-bit Bayer BGBG/GRGR Packed"; break; case V4L2_PIX_FMT_SGBRG10P: descr = "10-bit Bayer GBGB/RGRG Packed"; break; case V4L2_PIX_FMT_SGRBG10P: descr = "10-bit Bayer GRGR/BGBG Packed"; break; @@ -1211,6 +1210,14 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_SGBRG10DPCM8: descr = "8-bit Bayer GBGB/RGRG (DPCM)"; break; case V4L2_PIX_FMT_SGRBG10DPCM8: descr = "8-bit Bayer GRGR/BGBG (DPCM)"; break; case V4L2_PIX_FMT_SRGGB10DPCM8: descr = "8-bit Bayer RGRG/GBGB (DPCM)"; break; + case V4L2_PIX_FMT_SBGGR12: descr = "12-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG12: descr = "12-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG12: descr = "12-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB12: descr = "12-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_SBGGR12P: descr = "12-bit Bayer BGBG/GRGR Packed"; break; + case V4L2_PIX_FMT_SGBRG12P: descr = "12-bit Bayer GBGB/RGRG Packed"; break; + case V4L2_PIX_FMT_SGRBG12P: descr = "12-bit Bayer GRGR/BGBG Packed"; break; + case V4L2_PIX_FMT_SRGGB12P: descr = "12-bit Bayer RGRG/GBGB Packed"; break; case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR"; break; case V4L2_PIX_FMT_SGBRG16: descr = "16-bit Bayer GBGB/RGRG"; break; case V4L2_PIX_FMT_SGRBG16: descr = "16-bit Bayer GRGR/BGBG"; break; diff --git a/drivers/media/v4l2-core/v4l2-trace.c b/drivers/media/v4l2-core/v4l2-trace.c index 7416010542c1..95f3b02e1f84 100644 --- a/drivers/media/v4l2-core/v4l2-trace.c +++ b/drivers/media/v4l2-core/v4l2-trace.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 #include <media/v4l2-common.h> #include <media/v4l2-fh.h> #include <media/videobuf2-v4l2.h> diff --git a/drivers/media/v4l2-core/vb2-trace.c b/drivers/media/v4l2-core/vb2-trace.c index 61e74f5936b3..4c0f39d271f0 100644 --- a/drivers/media/v4l2-core/vb2-trace.c +++ b/drivers/media/v4l2-core/vb2-trace.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 #include <media/videobuf2-core.h> #define CREATE_TRACE_POINTS diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 14f83cecfa92..cb115ba6a1d2 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -194,8 +194,6 @@ static void __enqueue_in_driver(struct vb2_buffer *vb); static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; - enum dma_data_direction dma_dir = - q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; void *mem_priv; int plane; int ret = -ENOMEM; @@ -209,7 +207,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) mem_priv = call_ptr_memop(vb, alloc, q->alloc_devs[plane] ? : q->dev, - q->dma_attrs, size, dma_dir, q->gfp_flags); + q->dma_attrs, size, q->dma_dir, q->gfp_flags); if (IS_ERR_OR_NULL(mem_priv)) { if (mem_priv) ret = PTR_ERR(mem_priv); @@ -978,8 +976,6 @@ static int __prepare_userptr(struct vb2_buffer *vb, const void *pb) void *mem_priv; unsigned int plane; int ret = 0; - enum dma_data_direction dma_dir = - q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; bool reacquired = vb->planes[0].mem_priv == NULL; memset(planes, 0, sizeof(planes[0]) * vb->num_planes); @@ -1030,7 +1026,7 @@ static int __prepare_userptr(struct vb2_buffer *vb, const void *pb) mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_devs[plane] ? : q->dev, planes[plane].m.userptr, - planes[plane].length, dma_dir); + planes[plane].length, q->dma_dir); if (IS_ERR(mem_priv)) { dprintk(1, "failed acquiring userspace memory for plane %d\n", plane); @@ -1096,8 +1092,6 @@ static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb) void *mem_priv; unsigned int plane; int ret = 0; - enum dma_data_direction dma_dir = - q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; bool reacquired = vb->planes[0].mem_priv == NULL; memset(planes, 0, sizeof(planes[0]) * vb->num_planes); @@ -1139,7 +1133,7 @@ static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb) continue; } - dprintk(1, "buffer for plane %d changed\n", plane); + dprintk(3, "buffer for plane %d changed\n", plane); if (!reacquired) { reacquired = true; @@ -1156,7 +1150,7 @@ static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb) /* Acquire each plane's memory */ mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_devs[plane] ? : q->dev, - dbuf, planes[plane].length, dma_dir); + dbuf, planes[plane].length, q->dma_dir); if (IS_ERR(mem_priv)) { dprintk(1, "failed to attach dmabuf\n"); ret = PTR_ERR(mem_priv); @@ -1298,7 +1292,7 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb) /* Fill buffer information for the userspace */ call_void_bufop(q, fill_user_buffer, vb, pb); - dprintk(1, "prepare of buffer %d succeeded\n", vb->index); + dprintk(2, "prepare of buffer %d succeeded\n", vb->index); return ret; } @@ -1428,7 +1422,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb) return ret; } - dprintk(1, "qbuf of buffer %d succeeded\n", vb->index); + dprintk(2, "qbuf of buffer %d succeeded\n", vb->index); return 0; } EXPORT_SYMBOL_GPL(vb2_core_qbuf); @@ -1476,7 +1470,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) } if (nonblocking) { - dprintk(1, "nonblocking and no buffers to dequeue, will not wait\n"); + dprintk(3, "nonblocking and no buffers to dequeue, will not wait\n"); return -EAGAIN; } @@ -1623,7 +1617,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, /* go back to dequeued state */ __vb2_dqbuf(vb); - dprintk(1, "dqbuf of buffer %d, with state %d\n", + dprintk(2, "dqbuf of buffer %d, with state %d\n", vb->index, vb->state); return 0; @@ -2003,6 +1997,11 @@ int vb2_core_queue_init(struct vb2_queue *q) if (q->buf_struct_size == 0) q->buf_struct_size = sizeof(struct vb2_buffer); + if (q->bidirectional) + q->dma_dir = DMA_BIDIRECTIONAL; + else + q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + return 0; } EXPORT_SYMBOL_GPL(vb2_core_queue_init); diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 4f246d166111..a9806ba6116d 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c @@ -352,7 +352,7 @@ static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf, return vb2_dc_mmap(dbuf->priv, vma); } -static struct dma_buf_ops vb2_dc_dmabuf_ops = { +static const struct dma_buf_ops vb2_dc_dmabuf_ops = { .attach = vb2_dc_dmabuf_ops_attach, .detach = vb2_dc_dmabuf_ops_detach, .map_dma_buf = vb2_dc_dmabuf_ops_map, @@ -479,7 +479,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr, { struct vb2_dc_buf *buf; struct frame_vector *vec; - unsigned long offset; + unsigned int offset; int n_pages, i; int ret = 0; struct sg_table *sgt; @@ -507,8 +507,9 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr, buf->dev = dev; buf->dma_dir = dma_dir; - offset = vaddr & ~PAGE_MASK; - vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE); + offset = lower_32_bits(offset_in_page(vaddr)); + vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE || + dma_dir == DMA_BIDIRECTIONAL); if (IS_ERR(vec)) { ret = PTR_ERR(vec); goto fail_buf; diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index 5defa1f22ca2..6808231a6bdc 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c @@ -239,7 +239,8 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr, buf->offset = vaddr & ~PAGE_MASK; buf->size = size; buf->dma_sgt = &buf->sg_table; - vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE); + vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE || + dma_dir == DMA_BIDIRECTIONAL); if (IS_ERR(vec)) goto userptr_fail_pfnvec; buf->vec = vec; @@ -292,7 +293,8 @@ static void vb2_dma_sg_put_userptr(void *buf_priv) vm_unmap_ram(buf->vaddr, buf->num_pages); sg_free_table(buf->dma_sgt); while (--i >= 0) { - if (buf->dma_dir == DMA_FROM_DEVICE) + if (buf->dma_dir == DMA_FROM_DEVICE || + buf->dma_dir == DMA_BIDIRECTIONAL) set_page_dirty_lock(buf->pages[i]); } vb2_destroy_framevec(buf->vec); @@ -500,7 +502,7 @@ static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf, return vb2_dma_sg_mmap(dbuf->priv, vma); } -static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = { +static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = { .attach = vb2_dma_sg_dmabuf_ops_attach, .detach = vb2_dma_sg_dmabuf_ops_detach, .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c index b337d780844c..3a7c80cd1a17 100644 --- a/drivers/media/v4l2-core/videobuf2-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c @@ -87,7 +87,8 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr, buf->dma_dir = dma_dir; offset = vaddr & ~PAGE_MASK; buf->size = size; - vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE); + vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE || + dma_dir == DMA_BIDIRECTIONAL); if (IS_ERR(vec)) { ret = PTR_ERR(vec); goto fail_pfnvec_create; @@ -137,7 +138,8 @@ static void vb2_vmalloc_put_userptr(void *buf_priv) pages = frame_vector_pages(buf->vec); if (vaddr) vm_unmap_ram((void *)vaddr, n_pages); - if (buf->dma_dir == DMA_FROM_DEVICE) + if (buf->dma_dir == DMA_FROM_DEVICE || + buf->dma_dir == DMA_BIDIRECTIONAL) for (i = 0; i < n_pages; i++) set_page_dirty_lock(pages[i]); } else { @@ -338,7 +340,7 @@ static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf, return vb2_vmalloc_mmap(dbuf->priv, vma); } -static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = { +static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = { .attach = vb2_vmalloc_dmabuf_ops_attach, .detach = vb2_vmalloc_dmabuf_ops_detach, .map_dma_buf = vb2_vmalloc_dmabuf_ops_map, |