diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/dma-buf/udmabuf.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_fb_helper.c | 30 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_fourcc.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_gem.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_gem_shmem_helper.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_mode_config.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/scheduler/sched_entity.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/scheduler/sched_main.c | 4 | ||||
-rw-r--r-- | drivers/video/fbdev/core/fb_defio.c | 16 |
9 files changed, 74 insertions, 41 deletions
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index 283816fbd72f..740d6e426ee9 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -13,6 +13,8 @@ #include <linux/slab.h> #include <linux/udmabuf.h> #include <linux/hugetlb.h> +#include <linux/vmalloc.h> +#include <linux/iosys-map.h> static int list_limit = 1024; module_param(list_limit, int, 0644); @@ -60,6 +62,30 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma) return 0; } +static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) +{ + struct udmabuf *ubuf = buf->priv; + void *vaddr; + + dma_resv_assert_held(buf->resv); + + vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1); + if (!vaddr) + return -EINVAL; + + iosys_map_set_vaddr(map, vaddr); + return 0; +} + +static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) +{ + struct udmabuf *ubuf = buf->priv; + + dma_resv_assert_held(buf->resv); + + vm_unmap_ram(map->vaddr, ubuf->pagecount); +} + static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf, enum dma_data_direction direction) { @@ -162,6 +188,8 @@ static const struct dma_buf_ops udmabuf_ops = { .unmap_dma_buf = unmap_udmabuf, .release = release_udmabuf, .mmap = mmap_udmabuf, + .vmap = vmap_udmabuf, + .vunmap = vunmap_udmabuf, .begin_cpu_access = begin_cpu_udmabuf, .end_cpu_access = end_cpu_udmabuf, }; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index a1f86e436ae8..b3a731b9170a 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -403,6 +403,13 @@ err: spin_unlock_irqrestore(&helper->damage_lock, flags); } +static void drm_fb_helper_damage_work(struct work_struct *work) +{ + struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, damage_work); + + drm_fb_helper_fb_dirty(helper); +} + /** * drm_fb_helper_prepare - setup a drm_fb_helper structure * @dev: DRM device @@ -418,6 +425,7 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, INIT_LIST_HEAD(&helper->kernel_fb_list); spin_lock_init(&helper->damage_lock); INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker); + INIT_WORK(&helper->damage_work, drm_fb_helper_damage_work); helper->damage_clip.x1 = helper->damage_clip.y1 = ~0; mutex_init(&helper->lock); helper->funcs = funcs; @@ -549,6 +557,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) return; cancel_work_sync(&fb_helper->resume_work); + cancel_work_sync(&fb_helper->damage_work); info = fb_helper->info; if (info) { @@ -590,16 +599,9 @@ static void drm_fb_helper_add_damage_clip(struct drm_fb_helper *helper, u32 x, u static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y, u32 width, u32 height) { - struct fb_info *info = helper->info; - drm_fb_helper_add_damage_clip(helper, x, y, width, height); - /* - * The current fbdev emulation only flushes buffers if a damage - * update is necessary. And we can assume that deferred I/O has - * been enabled as damage updates require deferred I/O for mmap. - */ - fb_deferred_io_schedule_flush(info); + schedule_work(&helper->damage_work); } /* @@ -664,16 +666,10 @@ void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagerefli if (min_off < max_off) { drm_fb_helper_memory_range_to_clip(info, min_off, max_off - min_off, &damage_area); - drm_fb_helper_add_damage_clip(helper, damage_area.x1, damage_area.y1, - drm_rect_width(&damage_area), - drm_rect_height(&damage_area)); + drm_fb_helper_damage(helper, damage_area.x1, damage_area.y1, + drm_rect_width(&damage_area), + drm_rect_height(&damage_area)); } - - /* - * Flushes all dirty pages from mmap's pageref list and the - * areas that have been written by struct fb_ops callbacks. - */ - drm_fb_helper_fb_dirty(helper); } EXPORT_SYMBOL(drm_fb_helper_deferred_io); diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 6242dfbe9240..0f17dfa8702b 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -190,6 +190,10 @@ const struct drm_format_info *__drm_format_info(u32 format) { .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, +#ifdef __BIG_ENDIAN + { .format = DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, +#endif { .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 }, diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index b8db675e7fb5..59a0bb5ebd85 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -170,6 +170,20 @@ void drm_gem_private_object_init(struct drm_device *dev, EXPORT_SYMBOL(drm_gem_private_object_init); /** + * drm_gem_private_object_fini - Finalize a failed drm_gem_object + * @obj: drm_gem_object + * + * Uninitialize an already allocated GEM object when it initialized failed + */ +void drm_gem_private_object_fini(struct drm_gem_object *obj) +{ + WARN_ON(obj->dma_buf); + + dma_resv_fini(&obj->_resv); +} +EXPORT_SYMBOL(drm_gem_private_object_fini); + +/** * drm_gem_object_handle_free - release resources bound to userspace handles * @obj: GEM object to clean up. * @@ -930,12 +944,11 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private) void drm_gem_object_release(struct drm_gem_object *obj) { - WARN_ON(obj->dma_buf); - if (obj->filp) fput(obj->filp); - dma_resv_fini(&obj->_resv); + drm_gem_private_object_fini(obj); + drm_gem_free_mmap_offset(obj); drm_gem_lru_remove(obj); } diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 35138f8a375c..db73234edcbe 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -79,8 +79,10 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) } else { ret = drm_gem_object_init(dev, obj, size); } - if (ret) + if (ret) { + drm_gem_private_object_fini(obj); goto err_free; + } ret = drm_gem_create_mmap_offset(obj); if (ret) diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 688c8afe0bf1..8525ef851540 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -399,6 +399,8 @@ static void drm_mode_config_init_release(struct drm_device *dev, void *ptr) */ int drmm_mode_config_init(struct drm_device *dev) { + int ret; + mutex_init(&dev->mode_config.mutex); drm_modeset_lock_init(&dev->mode_config.connection_mutex); mutex_init(&dev->mode_config.idr_mutex); @@ -420,7 +422,11 @@ int drmm_mode_config_init(struct drm_device *dev) init_llist_head(&dev->mode_config.connector_free_list); INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn); - drm_mode_create_standard_properties(dev); + ret = drm_mode_create_standard_properties(dev); + if (ret) { + drm_mode_config_cleanup(dev); + return ret; + } /* Just to be sure */ dev->mode_config.num_fb = 0; diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index fe09e5be79bd..15d04a0ec623 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -81,7 +81,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, init_completion(&entity->entity_idle); /* We start in an idle state. */ - complete(&entity->entity_idle); + complete_all(&entity->entity_idle); spin_lock_init(&entity->rq_lock); spsc_queue_init(&entity->job_queue); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 31f3a1267be4..fd22d753b4ed 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -987,7 +987,7 @@ static int drm_sched_main(void *param) sched_job = drm_sched_entity_pop_job(entity); if (!sched_job) { - complete(&entity->entity_idle); + complete_all(&entity->entity_idle); continue; } @@ -998,7 +998,7 @@ static int drm_sched_main(void *param) trace_drm_run_job(sched_job, entity); fence = sched->ops->run_job(sched_job); - complete(&entity->entity_idle); + complete_all(&entity->entity_idle); drm_sched_fence_scheduled(s_fence); if (!IS_ERR_OR_NULL(fence)) { diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index dec678f72a42..c730253ab85c 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -332,19 +332,3 @@ void fb_deferred_io_cleanup(struct fb_info *info) mutex_destroy(&fbdefio->lock); } EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); - -void fb_deferred_io_schedule_flush(struct fb_info *info) -{ - struct fb_deferred_io *fbdefio = info->fbdefio; - - if (WARN_ON_ONCE(!fbdefio)) - return; /* bug in driver logic */ - - /* - * There's no requirement from callers to schedule the - * flush immediately. Rather schedule the worker with a - * delay and let a few more writes pile up. - */ - schedule_delayed_work(&info->deferred_work, fbdefio->delay); -} -EXPORT_SYMBOL_GPL(fb_deferred_io_schedule_flush); |