summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-11-06 14:37:56 +1000
committerDave Airlie <airlied@redhat.com>2020-11-06 15:00:01 +1000
commitc0f98d2f8b076bf3e3183aa547395f919c943a14 (patch)
tree943ca640139742367c9b22aaec0c9f7b67ab631e
parente047c7be173caab95f3876ab30c03ebcf654c300 (diff)
parent24e146cdf9f5a8fb464dd98ba8357d662d37d22f (diff)
Merge tag 'drm-misc-next-2020-11-05' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.11: UAPI Changes: Cross-subsystem Changes: - arch/arm64: Describe G12b GPU as coherent - iommu: Support coherency for Mali LPAE Core Changes: - atomic: Pass full state to CRTC atomic_{check, begin, flush}(); Use atomic-state pointers - drm: Remove SCATTER_LIST_MAX_SEGMENT; Cleanups - doc: Document legacy_cursor_update better; cleanups - edid: Don't warn n EDIDs of zero - ttm: New backend allocation pool; Remove old page allocator; Rework no_retry handling; Replace flags with booleans in struct ttm_operation_ctx - vram-helper: Cleanups - fbdev: Cleanups - console: Store font size as unsigned value Driver Changes: - ast: Support new display mode - amdgpu: Switch to new TTM allocator - hisilicon: Cleanups - nouveau: Switch to new TTM allocator; Fix include of swiotbl.h and limits.h; Use state helper instead of CRTC state pointer - panfrost: Support cache-coherent integrations; Fix mutex corruption on open/close; Cleanupse - qxl: Cleanups - radeon: Switch to new TTM allocator - ticdc: Fix build failure - vmwgfx: Switch to new TTM allocator - xlnx: Use dma_request_chan - fbdev/sh_mobile: Cleanups Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/20201105101641.GA13099@linux-uq9g
-rw-r--r--Documentation/gpu/todo.rst4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b.dtsi4
-rw-r--r--drivers/gpu/drm/Kconfig7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c52
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c14
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c14
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c20
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c14
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c17
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c7
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c10
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c2
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c4
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c5
-rw-r--r--drivers/gpu/drm/drm_prime.c4
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c12
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c6
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c4
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.h2
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-crtc.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c10
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c36
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c16
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c27
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c17
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c18
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c17
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c13
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c56
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c15
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c12
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c2
-rw-r--r--drivers/gpu/drm/stm/ltdc.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c13
-rw-r--r--drivers/gpu/drm/tegra/dc.c10
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c14
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c14
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c4
-rw-r--r--drivers/gpu/drm/ttm/Makefile5
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c1176
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c1226
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c667
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_set_memory.h84
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c16
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h3
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c10
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c4
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c36
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c12
-rw-r--r--drivers/gpu/drm/zte/zx_vou.c2
-rw-r--r--drivers/iommu/io-pgtable-arm.c11
-rw-r--r--drivers/video/console/sticore.c2
-rw-r--r--drivers/video/fbdev/atafb.c8
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c1
-rw-r--r--drivers/video/hdmi.c8
-rw-r--r--include/drm/drm_atomic.h12
-rw-r--r--include/drm/drm_modeset_helper_vtables.h9
-rw-r--r--include/drm/ttm/ttm_bo_api.h15
-rw-r--r--include/drm/ttm/ttm_bo_driver.h16
-rw-r--r--include/drm/ttm/ttm_caching.h2
-rw-r--r--include/drm/ttm/ttm_page_alloc.h122
-rw-r--r--include/drm/ttm/ttm_pool.h91
-rw-r--r--include/drm/ttm/ttm_tt.h2
-rw-r--r--include/linux/font.h2
-rw-r--r--include/linux/scatterlist.h6
-rw-r--r--tools/testing/scatterlist/main.c2
99 files changed, 1189 insertions, 3065 deletions
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 700637e25ecd..6b224ef14455 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -105,6 +105,10 @@ converted over to the new infrastructure.
One issue with the helpers is that they require that drivers handle completion
events for atomic commits correctly. But fixing these bugs is good anyway.
+Somewhat related is the legacy_cursor_update hack, which should be replaced with
+the new atomic_async_check/commit functionality in the helpers in drivers that
+still look at that flag.
+
Contact: Daniel Vetter, respective driver maintainers
Level: Advanced
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi
index 9b8548e5f6e5..ee8fcae9f9f0 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi
@@ -135,3 +135,7 @@
};
};
};
+
+&mali {
+ dma-coherent;
+};
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 32257189e09b..64376dd298ed 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -182,13 +182,6 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver
uses it.
-config DRM_TTM_DMA_PAGE_POOL
- bool
- depends on DRM_TTM && (SWIOTLB || INTEL_IOMMU)
- default y
- help
- Choose this if you need the TTM dma page pool
-
config DRM_VRAM_HELPER
tristate
depends on DRM
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d50b63a93d37..8466558d0d93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -404,8 +404,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
- .resv = bo->tbo.base.resv,
- .flags = 0
+ .resv = bo->tbo.base.resv
};
uint32_t domain;
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 1aa516429c80..e1f64ef8c765 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -516,9 +516,10 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
struct ttm_operation_ctx ctx = {
.interruptible = (bp->type != ttm_bo_type_kernel),
.no_wait_gpu = bp->no_wait_gpu,
- .resv = bp->resv,
- .flags = bp->type != ttm_bo_type_kernel ?
- TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
+ /* We opt to avoid OOM on system pages allocations */
+ .gfp_retry_mayfail = true,
+ .allow_res_evict = bp->type != ttm_bo_type_kernel,
+ .resv = bp->resv
};
struct amdgpu_bo *bo;
unsigned long page_align, size = bp->size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ddb1c8e9eea4..c01c060e4ac5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -47,7 +47,6 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include <drm/drm_debugfs.h>
#include <drm/amdgpu_drm.h>
@@ -1389,15 +1388,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
return 0;
}
-#ifdef CONFIG_SWIOTLB
- if (adev->need_swiotlb && swiotlb_nr_tbl()) {
- return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
- }
-#endif
-
- /* fall back to generic helper to populate the page array
- * and map them to the device */
- return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
+ return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
}
/**
@@ -1406,7 +1397,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
* Unmaps pages of a ttm_tt object from the device address space and
* unpopulates the page array backing it.
*/
-static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct amdgpu_device *adev;
@@ -1431,16 +1423,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *
return;
adev = amdgpu_ttm_adev(bdev);
-
-#ifdef CONFIG_SWIOTLB
- if (adev->need_swiotlb && swiotlb_nr_tbl()) {
- ttm_dma_unpopulate(&gtt->ttm, adev->dev);
- return;
- }
-#endif
-
- /* fall back to generic helper to unmap and unpopulate array */
- ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
+ return ttm_pool_free(&adev->mman.bdev.pool, ttm);
}
/**
@@ -1920,10 +1903,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
/* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&adev->mman.bdev,
- &amdgpu_bo_driver,
+ r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager,
+ adev->need_swiotlb,
dma_addressing_limited(adev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -1931,9 +1914,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
adev->mman.initialized = true;
- /* We opt to avoid OOM on system pages allocations */
- adev->mman.bdev.no_retry = true;
-
/* Initialize VRAM pool with all of VRAM divided into pages */
r = amdgpu_vram_mgr_init(adev);
if (r) {
@@ -2353,16 +2333,22 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
return 0;
}
+static int amdgpu_ttm_pool_debugfs(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
+}
+
static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
{"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
{"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
{"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
- {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
-#ifdef CONFIG_SWIOTLB
- {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
-#endif
+ {"ttm_page_pool", amdgpu_ttm_pool_debugfs, 0, NULL},
};
/**
@@ -2655,12 +2641,6 @@ int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
}
count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
-
-#ifdef CONFIG_SWIOTLB
- if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
- --count;
-#endif
-
return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
#else
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e2b23486ba4c..86fd4420f128 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5514,17 +5514,19 @@ static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
}
static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dc *dc = adev->dm.dc;
- struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
int ret = -EINVAL;
- dm_update_crtc_active_planes(crtc, state);
+ dm_update_crtc_active_planes(crtc, crtc_state);
if (unlikely(!dm_crtc_state->stream &&
- modeset_required(state, NULL, dm_crtc_state->stream))) {
+ modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
WARN_ON(1);
return ret;
}
@@ -5535,8 +5537,8 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
* planes are disabled, which is not supported by the hardware. And there is legacy
* userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
*/
- if (state->enable &&
- !(state->plane_mask & drm_plane_mask(crtc->primary)))
+ if (crtc_state->enable &&
+ !(crtc_state->plane_mask & drm_plane_mask(crtc->primary)))
return -EINVAL;
/* In some use cases, like reset, no stream is attached */
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index a4bbf56a7fc1..df0b9eeb8933 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -74,16 +74,18 @@ static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st)
*/
static int
komeda_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
- struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(state);
+ struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_state);
int err;
- if (drm_atomic_crtc_needs_modeset(state))
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
komeda_crtc_update_clock_ratio(kcrtc_st);
- if (state->active) {
+ if (crtc_state->active) {
err = komeda_build_display_data_flow(kcrtc, kcrtc_st);
if (err)
return err;
@@ -383,8 +385,10 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc,
static void
komeda_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state,
+ crtc);
/* commit with modeset will be handled in enable/disable */
if (drm_atomic_crtc_needs_modeset(crtc->state))
return;
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 84ac10d59485..a3234bfb0917 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -205,7 +205,7 @@ static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc,
}
static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
struct drm_pending_vblank_event *event = crtc->state->event;
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 49766eb7a554..108e7a31bd26 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -337,8 +337,10 @@ mclk_calc:
}
static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
struct drm_plane *plane;
@@ -373,7 +375,7 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
*/
/* first count the number of rotated planes */
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
struct drm_framebuffer *fb = pstate->fb;
if ((pstate->rotation & MALIDP_ROTATED_MASK) || fb->modifier)
@@ -389,7 +391,7 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
rot_mem_free += hwdev->rotation_memory[1];
/* now validate the rotation memory requirements */
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
struct malidp_plane *mp = to_malidp_plane(plane);
struct malidp_plane_state *ms = to_malidp_plane_state(pstate);
struct drm_framebuffer *fb = pstate->fb;
@@ -417,18 +419,18 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
}
/* If only the writeback routing has changed, we don't need a modeset */
- if (state->connectors_changed) {
+ if (crtc_state->connectors_changed) {
u32 old_mask = crtc->state->connector_mask;
- u32 new_mask = state->connector_mask;
+ u32 new_mask = crtc_state->connector_mask;
if ((old_mask ^ new_mask) ==
(1 << drm_connector_index(&malidp->mw_connector.base)))
- state->connectors_changed = false;
+ crtc_state->connectors_changed = false;
}
- ret = malidp_crtc_atomic_check_gamma(crtc, state);
- ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, state);
- ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, state);
+ ret = malidp_crtc_atomic_check_gamma(crtc, crtc_state);
+ ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, crtc_state);
+ ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, crtc_state);
return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index e0fbfc9ce386..ca643f4e2064 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -413,21 +413,23 @@ static void armada_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
static int armada_drm_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
- if (state->gamma_lut && drm_color_lut_size(state->gamma_lut) != 256)
+ if (crtc_state->gamma_lut && drm_color_lut_size(crtc_state->gamma_lut) != 256)
return -EINVAL;
- if (state->color_mgmt_changed)
- state->planes_changed = true;
+ if (crtc_state->color_mgmt_changed)
+ crtc_state->planes_changed = true;
return 0;
}
static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
@@ -441,7 +443,7 @@ static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc,
}
static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index bd03a8a67e3a..22f0e65fbe9a 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -751,24 +751,26 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
}
static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
struct drm_device *dev = crtc->dev;
struct ast_crtc_state *ast_state;
const struct drm_format_info *format;
bool succ;
- if (!state->enable)
+ if (!crtc_state->enable)
return 0; /* no mode checks if CRTC is being disabled */
- ast_state = to_ast_crtc_state(state);
+ ast_state = to_ast_crtc_state(crtc_state);
format = ast_state->format;
if (drm_WARN_ON_ONCE(dev, !format))
return -EINVAL; /* BUG: We didn't set format in primary check(). */
- succ = ast_get_vbios_mode_info(format, &state->mode,
- &state->adjusted_mode,
+ succ = ast_get_vbios_mode_info(format, &crtc_state->mode,
+ &crtc_state->adjusted_mode,
&ast_state->vbios_mode_info);
if (!succ)
return -EINVAL;
@@ -777,8 +779,11 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
}
static void
-ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state)
+ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct ast_private *ast = to_ast_private(crtc->dev);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc->state);
struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index d665dd5af5dd..3d013946714e 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -282,6 +282,8 @@ static const struct ast_vbios_enhtable res_1360x768[] = {
};
static const struct ast_vbios_enhtable res_1600x900[] = {
+ {1800, 1600, 24, 80, 1000, 900, 1, 3, VCLK108, /* 60Hz */
+ (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 3, 0x3A },
{1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
AST2500PreCatchCRT), 60, 1, 0x3A },
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 2b3888df22f8..c17571a3cc2b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -325,8 +325,9 @@ static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state)
}
static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
- struct drm_crtc_state *s)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *s = drm_atomic_get_new_crtc_state(state, c);
int ret;
ret = atmel_hlcdc_crtc_select_output_mode(s);
@@ -341,7 +342,7 @@ static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
}
static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
- struct drm_crtc_state *old_s)
+ struct drm_atomic_state *state)
{
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
@@ -356,7 +357,7 @@ static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
}
static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_s)
+ struct drm_atomic_state *state)
{
/* TODO: write common plane control register if available */
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index a7bcb4b4586c..ddd0e3239150 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -918,7 +918,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
if (!funcs || !funcs->atomic_check)
continue;
- ret = funcs->atomic_check(crtc, new_crtc_state);
+ ret = funcs->atomic_check(crtc, state);
if (ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
crtc->base.id, crtc->name);
@@ -2521,7 +2521,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
if (active_only && !new_crtc_state->active)
continue;
- funcs->atomic_begin(crtc, old_crtc_state);
+ funcs->atomic_begin(crtc, old_state);
}
for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
@@ -2579,7 +2579,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
if (active_only && !new_crtc_state->active)
continue;
- funcs->atomic_flush(crtc, old_crtc_state);
+ funcs->atomic_flush(crtc, old_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
@@ -2617,7 +2617,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
crtc_funcs = crtc->helper_private;
if (crtc_funcs && crtc_funcs->atomic_begin)
- crtc_funcs->atomic_begin(crtc, old_crtc_state);
+ crtc_funcs->atomic_begin(crtc, old_state);
drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
struct drm_plane_state *old_plane_state =
@@ -2643,7 +2643,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
}
if (crtc_funcs && crtc_funcs->atomic_flush)
- crtc_funcs->atomic_flush(crtc, old_crtc_state);
+ crtc_funcs->atomic_flush(crtc, old_state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
index a58cbde59c34..791379816837 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -241,7 +241,7 @@ static int drm_bridge_connector_get_modes_edid(struct drm_connector *connector,
goto no_edid;
edid = bridge->funcs->get_edid(bridge, connector);
- if (!edid || !drm_edid_is_valid(edid)) {
+ if (!drm_edid_is_valid(edid)) {
kfree(edid);
goto no_edid;
}
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 138ff34b31db..3bcabc2f6e0e 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -97,12 +97,12 @@
* &drm_plane specific COLOR_ENCODING and COLOR_RANGE properties. They
* are set up by calling drm_plane_create_color_properties().
*
- * "COLOR_ENCODING"
+ * "COLOR_ENCODING":
* Optional plane enum property to support different non RGB
* color encodings. The driver can provide a subset of standard
* enum values supported by the DRM plane.
*
- * "COLOR_RANGE"
+ * "COLOR_RANGE":
* Optional plane enum property to support different non RGB
* color parameter ranges. The driver can provide a subset of
* standard enum values supported by the DRM plane.
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 631125b46e04..c7363af731b4 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1844,7 +1844,7 @@ static void connector_bad_edid(struct drm_connector *connector,
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return;
- drm_warn(connector->dev, "%s: EDID is invalid:\n", connector->name);
+ drm_dbg_kms(connector->dev, "%s: EDID is invalid:\n", connector->name);
for (i = 0; i < num_blocks; i++) {
u8 *block = edid + i * EDID_LENGTH;
char prefix[20];
@@ -1856,7 +1856,7 @@ static void connector_bad_edid(struct drm_connector *connector,
else
sprintf(prefix, "\t[%02x] GOOD ", i);
- print_hex_dump(KERN_WARNING,
+ print_hex_dump(KERN_DEBUG,
prefix, DUMP_PREFIX_NONE, 16, 1,
block, EDID_LENGTH, false);
}
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 9da823eb0edd..16d68c04ea5d 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -15,7 +15,6 @@
#include <drm/drm_plane.h>
#include <drm/drm_prime.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/ttm/ttm_page_alloc.h>
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
@@ -1045,10 +1044,10 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
vmm->vram_base = vram_base;
vmm->vram_size = vram_size;
- ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
+ ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, dev->dev,
dev->anon_inode->i_mapping,
dev->vma_offset_manager,
- true);
+ false, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 187b55ede62e..a7b61c2d9190 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -820,8 +820,8 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
if (dev)
max_segment = dma_max_mapping_size(dev->dev);
- if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
- max_segment = SCATTERLIST_MAX_SEGMENT;
+ if (max_segment == 0)
+ max_segment = UINT_MAX;
sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
nr_pages << PAGE_SHIFT,
max_segment,
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index fa87b63e152a..743e57c1b44f 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -86,16 +86,18 @@ drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
}
static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
- bool has_primary = state->plane_mask &
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ bool has_primary = crtc_state->plane_mask &
drm_plane_mask(crtc->primary);
/* We always want to have an active plane with an active CRTC */
- if (has_primary != state->enable)
+ if (has_primary != crtc_state->enable)
return -EINVAL;
- return drm_atomic_add_affected_planes(state->state, crtc);
+ return drm_atomic_add_affected_planes(state, crtc);
}
static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 35f1d1dbb126..4153f302de7c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -49,21 +49,23 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
}
static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- if (!state->enable)
+ if (!crtc_state->enable)
return 0;
if (exynos_crtc->ops->atomic_check)
- return exynos_crtc->ops->atomic_check(exynos_crtc, state);
+ return exynos_crtc->ops->atomic_check(exynos_crtc, crtc_state);
return 0;
}
static void exynos_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
@@ -72,7 +74,7 @@ static void exynos_crtc_atomic_begin(struct drm_crtc *crtc,
}
static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 7a9e89cfdf9c..2af60d98f48f 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -21,7 +21,7 @@
#include "fsl_dcu_drm_plane.h"
static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index a1eabadf5adb..ea962acfeae0 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -139,7 +139,7 @@ static const u32 channel_formats1[] = {
DRM_FORMAT_ABGR8888
};
-static struct drm_plane_funcs hibmc_plane_funcs = {
+static const struct drm_plane_funcs hibmc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
@@ -390,7 +390,7 @@ static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
u32 reg;
struct drm_device *dev = crtc->dev;
@@ -410,7 +410,7 @@ static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc,
}
static void hibmc_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
unsigned long flags;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 0c1b40d25ac4..fee6fe810e74 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -369,7 +369,7 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
drm_dev_put(dev);
}
-static struct pci_device_id hibmc_pci_table[] = {
+static const struct pci_device_id hibmc_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, 0x1711) },
{0,}
};
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index cfe8ff596d55..d84d41f3e78f 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -485,7 +485,7 @@ static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
@@ -498,7 +498,7 @@ static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
}
static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h
index b7b59328cb76..883dd8d09d6b 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.h
+++ b/drivers/gpu/drm/i915/i915_scatterlist.h
@@ -112,7 +112,7 @@ static inline unsigned int i915_sg_segment_size(void)
unsigned int size = swiotlb_max_segment();
if (size == 0)
- return SCATTERLIST_MAX_SEGMENT;
+ size = UINT_MAX;
size = rounddown(size, PAGE_SIZE);
/* swiotlb_max_segment_size can return 1 byte when it means one page. */
diff --git a/drivers/gpu/drm/imx/dcss/dcss-crtc.c b/drivers/gpu/drm/imx/dcss/dcss-crtc.c
index 8f570eb5f471..31267c00782f 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-crtc.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-crtc.c
@@ -53,13 +53,13 @@ static const struct drm_crtc_funcs dcss_crtc_funcs = {
};
static void dcss_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
drm_crtc_vblank_on(crtc);
}
static void dcss_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 7ecc27c41a6a..7ebd99ee3240 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -227,24 +227,26 @@ static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
}
static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
u32 primary_plane_mask = drm_plane_mask(crtc->primary);
- if (state->active && (primary_plane_mask & state->plane_mask) == 0)
+ if (crtc_state->active && (primary_plane_mask & crtc_state->plane_mask) == 0)
return -EINVAL;
return 0;
}
static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
drm_crtc_vblank_on(crtc);
}
static void ipu_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 2329754af116..b9c156e13156 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -239,28 +239,33 @@ static void ingenic_drm_crtc_update_timings(struct ingenic_drm *priv,
}
static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
struct drm_plane_state *f1_state, *f0_state, *ipu_state = NULL;
- if (state->gamma_lut &&
- drm_color_lut_size(state->gamma_lut) != ARRAY_SIZE(priv->dma_hwdescs->palette)) {
+ if (crtc_state->gamma_lut &&
+ drm_color_lut_size(crtc_state->gamma_lut) != ARRAY_SIZE(priv->dma_hwdescs->palette)) {
dev_dbg(priv->dev, "Invalid palette size\n");
return -EINVAL;
}
- if (drm_atomic_crtc_needs_modeset(state) && priv->soc_info->has_osd) {
- f1_state = drm_atomic_get_plane_state(state->state, &priv->f1);
+ if (drm_atomic_crtc_needs_modeset(crtc_state) && priv->soc_info->has_osd) {
+ f1_state = drm_atomic_get_plane_state(crtc_state->state,
+ &priv->f1);
if (IS_ERR(f1_state))
return PTR_ERR(f1_state);
- f0_state = drm_atomic_get_plane_state(state->state, &priv->f0);
+ f0_state = drm_atomic_get_plane_state(crtc_state->state,
+ &priv->f0);
if (IS_ERR(f0_state))
return PTR_ERR(f0_state);
if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && priv->ipu_plane) {
- ipu_state = drm_atomic_get_plane_state(state->state, priv->ipu_plane);
+ ipu_state = drm_atomic_get_plane_state(crtc_state->state,
+ priv->ipu_plane);
if (IS_ERR(ipu_state))
return PTR_ERR(ipu_state);
@@ -298,7 +303,7 @@ ingenic_drm_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode
}
static void ingenic_drm_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *oldstate)
+ struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
u32 ctrl = 0;
@@ -318,26 +323,27 @@ static void ingenic_drm_crtc_atomic_begin(struct drm_crtc *crtc,
}
static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *oldstate)
+ struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
- struct drm_crtc_state *state = crtc->state;
- struct drm_pending_vblank_event *event = state->event;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_pending_vblank_event *event = crtc_state->event;
- if (drm_atomic_crtc_needs_modeset(state)) {
- ingenic_drm_crtc_update_timings(priv, &state->mode);
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ ingenic_drm_crtc_update_timings(priv, &crtc_state->mode);
priv->update_clk_rate = true;
}
if (priv->update_clk_rate) {
mutex_lock(&priv->clk_mutex);
- clk_set_rate(priv->pix_clk, state->adjusted_mode.clock * 1000);
+ clk_set_rate(priv->pix_clk,
+ crtc_state->adjusted_mode.clock * 1000);
priv->update_clk_rate = false;
mutex_unlock(&priv->clk_mutex);
}
if (event) {
- state->event = NULL;
+ crtc_state->event = NULL;
spin_lock_irq(&crtc->dev->event_lock);
if (drm_crtc_vblank_get(crtc) == 0)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index c28f5d7aac1a..23f5c10b0c67 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -575,24 +575,24 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
}
static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
- struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
+ struct mtk_crtc_state *crtc_state = to_mtk_crtc_state(crtc->state);
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- if (mtk_crtc->event && state->base.event)
+ if (mtk_crtc->event && crtc_state->base.event)
DRM_ERROR("new event while there is still a pending event\n");
- if (state->base.event) {
- state->base.event->pipe = drm_crtc_index(crtc);
+ if (crtc_state->base.event) {
+ crtc_state->base.event->pipe = drm_crtc_index(crtc);
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
- mtk_crtc->event = state->base.event;
- state->base.event = NULL;
+ mtk_crtc->event = crtc_state->base.event;
+ crtc_state->base.event = NULL;
}
}
static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
int i;
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 247ce085886b..d70616da8ce2 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -201,7 +201,7 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
}
static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
unsigned long flags;
@@ -217,7 +217,7 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
}
static void meson_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 6a24ce245a37..e55be2922c2f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -486,7 +486,7 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
}
static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct drm_encoder *encoder;
@@ -527,7 +527,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
}
static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct dpu_crtc *dpu_crtc;
struct drm_device *dev;
@@ -815,10 +815,12 @@ struct plane_state {
};
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
- struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
struct plane_state *pstates;
const struct drm_plane_state *pstate;
@@ -835,32 +837,33 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
- if (!state->enable || !state->active) {
+ if (!crtc_state->enable || !crtc_state->active) {
DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
- crtc->base.id, state->enable, state->active);
+ crtc->base.id, crtc_state->enable,
+ crtc_state->active);
goto end;
}
- mode = &state->adjusted_mode;
+ mode = &crtc_state->adjusted_mode;
DPU_DEBUG("%s: check", dpu_crtc->name);
/* force a full mode set if active state changed */
- if (state->active_changed)
- state->mode_changed = true;
+ if (crtc_state->active_changed)
+ crtc_state->mode_changed = true;
memset(pipe_staged, 0, sizeof(pipe_staged));
if (cstate->num_mixers) {
mixer_width = mode->hdisplay / cstate->num_mixers;
- _dpu_crtc_setup_lm_bounds(crtc, state);
+ _dpu_crtc_setup_lm_bounds(crtc, crtc_state);
}
crtc_rect.x2 = mode->hdisplay;
crtc_rect.y2 = mode->vdisplay;
/* get plane state for all drm planes associated with crtc state */
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
struct drm_rect dst, clip = crtc_rect;
if (IS_ERR_OR_NULL(pstate)) {
@@ -966,7 +969,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
- rc = dpu_core_perf_crtc_check(crtc, state);
+ rc = dpu_core_perf_crtc_check(crtc, crtc_state);
if (rc) {
DPU_ERROR("crtc%d failed performance check %d\n",
crtc->base.id, rc);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 6b03ceeb5ba1..34e3186e236d 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -307,7 +307,7 @@ static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
}
static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
DBG("%s: check", mdp4_crtc->name);
@@ -316,14 +316,14 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
}
static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
DBG("%s: begin", mdp4_crtc->name);
}
static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_device *dev = crtc->dev;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 747dd8a7aa6e..4a53d7b42e9c 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -7,6 +7,7 @@
#include <linux/sort.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_mode.h>
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
@@ -682,15 +683,17 @@ static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
}
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
struct drm_device *dev = crtc->dev;
struct plane_state pstates[STAGE_MAX + 1];
const struct mdp5_cfg_hw *hw_cfg;
const struct drm_plane_state *pstate;
- const struct drm_display_mode *mode = &state->adjusted_mode;
+ const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
bool cursor_plane = false;
bool need_right_mixer = false;
int cnt = 0, i;
@@ -699,7 +702,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
DBG("%s: check", crtc->name);
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
if (!pstate->visible)
continue;
@@ -731,7 +734,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
if (mode->hdisplay > hw_cfg->lm.max_width)
need_right_mixer = true;
- ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
+ ret = mdp5_crtc_setup_pipeline(crtc, crtc_state, need_right_mixer);
if (ret) {
DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
return ret;
@@ -744,7 +747,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
WARN_ON(cursor_plane &&
(pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
- start = get_start_stage(crtc, state, &pstates[0].state->base);
+ start = get_start_stage(crtc, crtc_state, &pstates[0].state->base);
/* verify that there are not too many planes attached to crtc
* and that we don't have conflicting mixer stages:
@@ -769,13 +772,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
}
static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
DBG("%s: begin", crtc->name);
}
static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
index 956f631997f2..9040835289a8 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
@@ -269,21 +269,23 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
}
static int mxsfb_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
- bool has_primary = state->plane_mask &
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ bool has_primary = crtc_state->plane_mask &
drm_plane_mask(crtc->primary);
/* The primary plane has to be enabled when the CRTC is active. */
- if (state->active && !has_primary)
+ if (crtc_state->active && !has_primary)
return -EINVAL;
/* TODO: Is this needed ? */
- return drm_atomic_add_affected_planes(state->state, crtc);
+ return drm_atomic_add_affected_planes(state, crtc);
}
static void mxsfb_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_pending_vblank_event *event;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 841edfaf5b9d..537c1ef2e464 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -30,6 +30,7 @@
#include <nvif/event.h>
#include <nvif/cl0046.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_vblank.h>
@@ -310,12 +311,16 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
}
static int
-nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
+nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
struct nouveau_drm *drm = nouveau_drm(crtc->dev);
struct nv50_head *head = nv50_head(crtc);
- struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
- struct nv50_head_atom *asyh = nv50_head_atom(state);
+ struct nv50_head_atom *armh = nv50_head_atom(old_crtc_state);
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
struct nouveau_conn_atom *asyc = NULL;
struct drm_connector_state *conns;
struct drm_connector *conn;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 75fddbcd7832..8133377d865d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -28,7 +28,6 @@
*/
#include <linux/dma-mapping.h>
-#include <linux/swiotlb.h>
#include "nouveau_drv.h"
#include "nouveau_chan.h"
@@ -1327,25 +1326,13 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
drm = nouveau_bdev(bdev);
dev = drm->dev->dev;
-#if IS_ENABLED(CONFIG_AGP)
- if (drm->agp.bridge) {
- return ttm_pool_populate(ttm, ctx);
- }
-#endif
-
-#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
- if (swiotlb_nr_tbl()) {
- return ttm_dma_populate((void *)ttm, dev, ctx);
- }
-#endif
- return ttm_populate_and_map_pages(dev, ttm_dma, ctx);
+ return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx);
}
static void
nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{
- struct ttm_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
struct device *dev;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1356,21 +1343,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
drm = nouveau_bdev(bdev);
dev = drm->dev->dev;
-#if IS_ENABLED(CONFIG_AGP)
- if (drm->agp.bridge) {
- ttm_pool_unpopulate(ttm);
- return;
- }
-#endif
-
-#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
- if (swiotlb_nr_tbl()) {
- ttm_dma_unpopulate((void *)ttm, dev);
- return;
- }
-#endif
-
- ttm_unmap_and_unpopulate_pages(dev, ttm_dma);
+ return ttm_pool_free(&drm->ttm.bdev.pool, ttm);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index b8025507a9e4..9d04d1b36434 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -56,7 +56,6 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_memory.h>
#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include <drm/drm_audio_component.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 0592ed6eaad1..a37bc3d7b38b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -22,6 +22,10 @@
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+
+#include <linux/limits.h>
+#include <linux/swiotlb.h>
+
#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
@@ -281,6 +285,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
struct nvkm_pci *pci = device->pci;
struct nvif_mmu *mmu = &drm->client.mmu;
struct drm_device *dev = drm->dev;
+ bool need_swiotlb = false;
int typei, ret;
ret = nouveau_ttm_init_host(drm, 0);
@@ -315,11 +320,14 @@ nouveau_ttm_init(struct nouveau_drm *drm)
drm->agp.cma = pci->agp.cma;
}
- ret = ttm_bo_device_init(&drm->ttm.bdev,
- &nouveau_bo_driver,
- dev->anon_inode->i_mapping,
- dev->vma_offset_manager,
- drm->client.mmu.dmabits <= 32 ? true : false);
+#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
+ need_swiotlb = !!swiotlb_nr_tbl();
+#endif
+
+ ret = ttm_bo_device_init(&drm->ttm.bdev, &nouveau_bo_driver,
+ drm->dev->dev, dev->anon_inode->i_mapping,
+ dev->vma_offset_manager, need_swiotlb,
+ drm->client.mmu.dmabits <= 32);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index fef3b0032fd8..49621b2e1ab5 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -569,22 +569,25 @@ static bool omap_crtc_is_manually_updated(struct drm_crtc *crtc)
}
static int omap_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
struct drm_plane_state *pri_state;
- if (state->color_mgmt_changed && state->gamma_lut) {
- unsigned int length = state->gamma_lut->length /
+ if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) {
+ unsigned int length = crtc_state->gamma_lut->length /
sizeof(struct drm_color_lut);
if (length < 2)
return -EINVAL;
}
- pri_state = drm_atomic_get_new_plane_state(state->state, crtc->primary);
+ pri_state = drm_atomic_get_new_plane_state(state,
+ crtc->primary);
if (pri_state) {
struct omap_crtc_state *omap_crtc_state =
- to_omap_crtc_state(state);
+ to_omap_crtc_state(crtc_state);
/* Mirror new values for zpos and rotation in omap_crtc_state */
omap_crtc_state->zpos = pri_state->zpos;
@@ -598,12 +601,12 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc,
}
static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
}
static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index ea8d31863c50..1daf9322954a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -18,7 +18,7 @@
static int panfrost_reset_init(struct panfrost_device *pfdev)
{
- pfdev->rstc = devm_reset_control_array_get(pfdev->dev, false, true);
+ pfdev->rstc = devm_reset_control_array_get_optional_exclusive(pfdev->dev);
if (IS_ERR(pfdev->rstc)) {
dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc));
return PTR_ERR(pfdev->rstc);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 2e9cbd1c4a58..140e004a3790 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -88,6 +88,7 @@ struct panfrost_device {
/* pm_domains for devices with more than one. */
struct device *pm_domain_devs[MAX_PM_DOMAINS];
struct device_link *pm_domain_links[MAX_PM_DOMAINS];
+ bool coherent;
struct panfrost_features features;
const struct panfrost_compatible *comp;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 37d4cb7a5491..6e5dedacb777 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -587,6 +587,8 @@ static int panfrost_probe(struct platform_device *pdev)
if (!pfdev->comp)
return -ENODEV;
+ pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
+
/* Allocate and initialze the DRM device. */
ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
if (IS_ERR(ddev))
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 1a6cea0e0bd7..fb9f7334ce18 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -220,6 +220,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
*/
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
{
+ struct panfrost_device *pfdev = dev->dev_private;
struct panfrost_gem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
@@ -229,6 +230,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
INIT_LIST_HEAD(&obj->mappings.list);
mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs;
+ obj->base.map_cached = pfdev->coherent;
return &obj->base.base;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index d0469e944143..e75b7d2192f7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -554,6 +554,8 @@ int panfrost_job_init(struct panfrost_device *pfdev)
}
for (j = 0; j < NUM_JOB_SLOTS; j++) {
+ mutex_init(&js->queue[j].lock);
+
js->queue[j].fence_context = dma_fence_context_alloc(1);
ret = drm_sched_init(&js->queue[j].sched,
@@ -584,8 +586,10 @@ void panfrost_job_fini(struct panfrost_device *pfdev)
job_write(pfdev, JOB_INT_MASK, 0);
- for (j = 0; j < NUM_JOB_SLOTS; j++)
+ for (j = 0; j < NUM_JOB_SLOTS; j++) {
drm_sched_fini(&js->queue[j].sched);
+ mutex_destroy(&js->queue[j].lock);
+ }
}
@@ -597,7 +601,6 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
int ret, i;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
- mutex_init(&js->queue[i].lock);
sched = &js->queue[i].sched;
ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
DRM_SCHED_PRIORITY_NORMAL, &sched,
@@ -610,14 +613,10 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
{
- struct panfrost_device *pfdev = panfrost_priv->pfdev;
- struct panfrost_job_slot *js = pfdev->js;
int i;
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
- mutex_destroy(&js->queue[i].lock);
- }
}
int panfrost_job_is_idle(struct panfrost_device *pfdev)
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 776448c527ea..be8d68fb0e11 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -371,6 +371,7 @@ int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
.pgsize_bitmap = SZ_4K | SZ_2M,
.ias = FIELD_GET(0xff, pfdev->features.mmu_features),
.oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
+ .coherent_walk = pfdev->coherent,
.tlb = &mmu_tlb_ops,
.iommu_dev = pfdev->dev,
};
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 45fd76e04bdc..07a3e3c23f09 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -372,7 +372,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
}
static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
qxl_crtc_update_monitors_config(crtc, "flush");
}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 9609eeb52821..a80d59634143 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -32,7 +32,6 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include <drm/ttm/ttm_placement.h>
#include "qxl_drv.h"
@@ -194,11 +193,10 @@ int qxl_ttm_init(struct qxl_device *qdev)
int num_io_pages; /* != rom->num_io_pages, we include surface0 */
/* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&qdev->mman.bdev,
- &qxl_bo_driver,
+ r = ttm_bo_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
qdev->ddev.anon_inode->i_mapping,
qdev->ddev.vma_offset_manager,
- false);
+ false, false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 0a6d7ea847db..95038ac3382e 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -47,7 +47,6 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include <drm/ttm/ttm_placement.h>
#include "radeon_reg.h"
@@ -679,19 +678,7 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
return 0;
}
-#if IS_ENABLED(CONFIG_AGP)
- if (rdev->flags & RADEON_IS_AGP) {
- return ttm_pool_populate(ttm, ctx);
- }
-#endif
-
-#ifdef CONFIG_SWIOTLB
- if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
- return ttm_dma_populate(&gtt->ttm, rdev->dev, ctx);
- }
-#endif
-
- return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm, ctx);
+ return ttm_pool_alloc(&rdev->mman.bdev.pool, ttm, ctx);
}
static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
@@ -709,21 +696,7 @@ static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *
if (slave)
return;
-#if IS_ENABLED(CONFIG_AGP)
- if (rdev->flags & RADEON_IS_AGP) {
- ttm_pool_unpopulate(ttm);
- return;
- }
-#endif
-
-#ifdef CONFIG_SWIOTLB
- if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
- ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
- return;
- }
-#endif
-
- ttm_unmap_and_unpopulate_pages(rdev->dev, &gtt->ttm);
+ return ttm_pool_free(&rdev->mman.bdev.pool, ttm);
}
int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
@@ -846,10 +819,10 @@ int radeon_ttm_init(struct radeon_device *rdev)
int r;
/* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&rdev->mman.bdev,
- &radeon_bo_driver,
+ r = ttm_bo_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
rdev->ddev->anon_inode->i_mapping,
rdev->ddev->vma_offset_manager,
+ rdev->need_swiotlb,
dma_addressing_limited(&rdev->pdev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -857,6 +830,9 @@ int radeon_ttm_init(struct radeon_device *rdev)
}
rdev->mman.initialized = true;
+ ttm_pool_init(&rdev->mman.bdev.pool, rdev->dev, rdev->need_swiotlb,
+ dma_addressing_limited(&rdev->pdev->dev));
+
r = radeon_ttm_init_vram(rdev);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
@@ -1004,6 +980,14 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
return 0;
}
+static int radeon_ttm_pool_debugfs(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ return ttm_pool_debugfs(&rdev->mman.bdev.pool, m);
+}
static int ttm_pl_vram = TTM_PL_VRAM;
static int ttm_pl_tt = TTM_PL_TT;
@@ -1011,10 +995,7 @@ static int ttm_pl_tt = TTM_PL_TT;
static struct drm_info_list radeon_ttm_debugfs_list[] = {
{"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
{"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
- {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
-#ifdef CONFIG_SWIOTLB
- {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
-#endif
+ {"ttm_page_pool", radeon_ttm_pool_debugfs, 0, NULL}
};
static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
@@ -1142,11 +1123,6 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
count = ARRAY_SIZE(radeon_ttm_debugfs_list);
-#ifdef CONFIG_SWIOTLB
- if (!(rdev->need_swiotlb && swiotlb_nr_tbl()))
- --count;
-#endif
-
return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
#else
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 4c360a255849..b5fb941e0f53 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -682,20 +682,23 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
*/
static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
- struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(state);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc_state);
struct drm_encoder *encoder;
int ret;
- ret = rcar_du_cmm_check(crtc, state);
+ ret = rcar_du_cmm_check(crtc, crtc_state);
if (ret)
return ret;
/* Store the routes from the CRTC output to the DU outputs. */
rstate->outputs = 0;
- drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) {
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc_state->encoder_mask) {
struct rcar_du_encoder *renc;
/* Skip the writeback encoder. */
@@ -782,7 +785,7 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
}
static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
@@ -811,7 +814,7 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
}
static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct drm_device *dev = rcrtc->crtc.dev;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 47835715b44b..8cd39fca81a3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1246,8 +1246,10 @@ static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc,
}
static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct vop *vop = to_vop(crtc);
/*
@@ -1415,8 +1417,10 @@ static void vop_wait_for_irq_handler(struct vop *vop)
}
static int vop_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
struct vop *vop = to_vop(crtc);
struct drm_plane *plane;
struct drm_plane_state *plane_state;
@@ -1460,8 +1464,10 @@ static int vop_crtc_atomic_check(struct drm_crtc *crtc,
}
static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct drm_atomic_state *old_state = old_crtc_state->state;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct vop *vop = to_vop(crtc);
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 5726746f6d18..409795786f03 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -133,7 +133,7 @@ sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct drm_device *drm_dev = crtc->dev;
struct sti_mixer *mixer = to_sti_mixer(crtc);
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index e9af92d4a74b..3980677435cb 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -596,7 +596,7 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_device *ddev = crtc->dev;
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 999deb64bd70..45d9eb552d86 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -15,6 +15,7 @@
#include <video/videomode.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modes.h>
@@ -45,21 +46,25 @@ static struct drm_encoder *sun4i_crtc_get_encoder(struct drm_crtc *crtc)
}
static int sun4i_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
struct sunxi_engine *engine = scrtc->engine;
int ret = 0;
if (engine && engine->ops && engine->ops->atomic_check)
- ret = engine->ops->atomic_check(engine, state);
+ ret = engine->ops->atomic_check(engine, crtc_state);
return ret;
}
static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct sunxi_engine *engine = scrtc->engine;
@@ -79,7 +84,7 @@ static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc,
}
static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
struct drm_pending_vblank_event *event = crtc->state->event;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 52acc2f8f798..2d86627b0d4e 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1918,7 +1918,7 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void tegra_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
unsigned long flags;
@@ -1937,17 +1937,17 @@ static void tegra_crtc_atomic_begin(struct drm_crtc *crtc,
}
static void tegra_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
- struct tegra_dc_state *state = to_dc_state(crtc->state);
+ struct tegra_dc_state *crtc_state = to_dc_state(crtc->state);
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
- value = state->planes << 8 | GENERAL_UPDATE;
+ value = crtc_state->planes << 8 | GENERAL_UPDATE;
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
- value = state->planes | GENERAL_ACT_REQ;
+ value = crtc_state->planes | GENERAL_ACT_REQ;
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
}
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index 848b9c7b553d..2218da3b3ca3 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -85,8 +85,10 @@ void tidss_crtc_error_irq(struct drm_crtc *crtc, u64 irqstatus)
/* drm_crtc_helper_funcs */
static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
struct drm_device *ddev = crtc->dev;
struct tidss_device *tidss = to_tidss(ddev);
struct dispc_device *dispc = tidss->dispc;
@@ -97,10 +99,10 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
dev_dbg(ddev->dev, "%s\n", __func__);
- if (!state->enable)
+ if (!crtc_state->enable)
return 0;
- mode = &state->adjusted_mode;
+ mode = &crtc_state->adjusted_mode;
ok = dispc_vp_mode_valid(dispc, hw_videoport, mode);
if (ok != MODE_OK) {
@@ -109,7 +111,7 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
- return dispc_vp_bus_check(dispc, hw_videoport, state);
+ return dispc_vp_bus_check(dispc, hw_videoport, crtc_state);
}
/*
@@ -161,8 +163,10 @@ static void tidss_crtc_position_planes(struct tidss_device *tidss,
}
static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
struct tidss_device *tidss = to_tidss(ddev);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index da2ab2aa3577..30213708fc99 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -535,7 +535,7 @@ static void tilcdc_crtc_atomic_disable(struct drm_crtc *crtc,
}
static void tilcdc_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
if (!crtc->state->event)
return;
@@ -657,15 +657,17 @@ static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
}
static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
/* If we are not active we don't care */
- if (!state->active)
+ if (!crtc_state->active)
return 0;
- if (state->state->planes[0].ptr != crtc->primary ||
- state->state->planes[0].state == NULL ||
- state->state->planes[0].state->crtc != crtc) {
+ if (state->planes[0].ptr != crtc->primary ||
+ state->planes[0].state == NULL ||
+ state->planes[0].state->crtc != crtc) {
dev_dbg(crtc->dev->dev, "CRTC primary plane must be present");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index c5f82e693f1a..3d7e4db756b7 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -432,8 +432,8 @@ static int tilcdc_mm_show(struct seq_file *m, void *arg)
}
static struct drm_info_list tilcdc_debugfs_list[] = {
- { "regs", tilcdc_regs_show, 0 },
- { "mm", tilcdc_mm_show, 0 },
+ { "regs", tilcdc_regs_show, 0, NULL },
+ { "mm", tilcdc_mm_show, 0, NULL },
};
static void tilcdc_debugfs_init(struct drm_minor *minor)
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 90c0da88cc98..b6f5f87b270f 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,9 +4,8 @@
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
- ttm_execbuf_util.o ttm_page_alloc.o ttm_range_manager.o \
- ttm_resource.o
+ ttm_execbuf_util.o ttm_range_manager.o \
+ ttm_resource.o ttm_pool.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
-ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 4f76c9287159..03c86628e4ac 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -34,7 +34,6 @@
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/agp_backend.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f51b5e20fa17..e2a124b3affb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -637,7 +637,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
if (bo->base.resv == ctx->resv) {
dma_resv_assert_held(bo->base.resv);
- if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT)
+ if (ctx->allow_res_evict)
ret = true;
*locked = false;
if (busy)
@@ -1283,6 +1283,8 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
pr_debug("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock);
+ ttm_pool_fini(&bdev->pool);
+
if (!ret)
ttm_bo_global_release();
@@ -1307,9 +1309,10 @@ static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
+ struct device *dev,
struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager,
- bool need_dma32)
+ bool use_dma_alloc, bool use_dma32)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
int ret;
@@ -1324,12 +1327,12 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
bdev->driver = driver;
ttm_bo_init_sysman(bdev);
+ ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager;
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping;
- bdev->need_dma32 = need_dma32;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&ttm_global_mutex);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index eeaca5d1efe3..2944fa0af493 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -315,8 +315,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
- .flags = TTM_OPT_FLAG_FORCE_ALLOC
-
+ .force_alloc = true
};
ttm = bo->ttm;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 69cf622e79e5..5ed1fc8f2ace 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -30,7 +30,6 @@
#include <drm/ttm/ttm_memory.h>
#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/wait.h>
@@ -38,6 +37,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/swap.h>
+#include <drm/ttm/ttm_pool.h>
#define TTM_MEMORY_ALLOC_RETRIES 4
@@ -451,8 +451,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
zone->name, (unsigned long long)zone->max_mem >> 10);
}
- ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
- ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+ ttm_pool_mgr_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);
@@ -465,8 +464,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
unsigned int i;
/* let the page allocator first stop the shrink work. */
- ttm_page_alloc_fini();
- ttm_dma_page_alloc_fini();
+ ttm_pool_mgr_fini();
flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
@@ -544,7 +542,8 @@ ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
{
int64_t available;
- if (ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC)
+ /* We allow over commit during suspend */
+ if (ctx->force_alloc)
return false;
available = get_nr_swap_pages() + si_mem_available();
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
deleted file mode 100644
index 29e6c29ad60e..000000000000
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ /dev/null
@@ -1,1176 +0,0 @@
-/*
- * Copyright (c) Red Hat Inc.
-
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie <airlied@redhat.com>
- * Jerome Glisse <jglisse@redhat.com>
- * Pauli Nieminen <suokkos@gmail.com>
- */
-
-/* simple list based uncached page pool
- * - Pool collects resently freed pages for reuse
- * - Use page->lru to keep a free list
- * - doesn't track currently in use pages
- */
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/highmem.h>
-#include <linux/mm_types.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/seq_file.h> /* for seq_printf */
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-
-#include <linux/atomic.h>
-
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
-
-#include "ttm_set_memory.h"
-
-#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
-#define SMALL_ALLOCATION 16
-#define FREE_ALL_PAGES (~0U)
-/* times are in msecs */
-#define PAGE_FREE_INTERVAL 1000
-
-/**
- * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
- *
- * @lock: Protects the shared pool from concurrnet access. Must be used with
- * irqsave/irqrestore variants because pool allocator maybe called from
- * delayed work.
- * @fill_lock: Prevent concurrent calls to fill.
- * @list: Pool of free uc/wc pages for fast reuse.
- * @gfp_flags: Flags to pass for alloc_page.
- * @npages: Number of pages in pool.
- */
-struct ttm_page_pool {
- spinlock_t lock;
- bool fill_lock;
- struct list_head list;
- gfp_t gfp_flags;
- unsigned npages;
- char *name;
- unsigned long nfrees;
- unsigned long nrefills;
- unsigned int order;
-};
-
-/**
- * Limits for the pool. They are handled without locks because only place where
- * they may change is in sysfs store. They won't have immediate effect anyway
- * so forcing serialization to access them is pointless.
- */
-
-struct ttm_pool_opts {
- unsigned alloc_size;
- unsigned max_size;
- unsigned small;
-};
-
-#define NUM_POOLS 6
-
-/**
- * struct ttm_pool_manager - Holds memory pools for fst allocation
- *
- * Manager is read only object for pool code so it doesn't need locking.
- *
- * @free_interval: minimum number of jiffies between freeing pages from pool.
- * @page_alloc_inited: reference counting for pool allocation.
- * @work: Work that is used to shrink the pool. Work is only run when there is
- * some pages to free.
- * @small_allocation: Limit in number of pages what is small allocation.
- *
- * @pools: All pool objects in use.
- **/
-struct ttm_pool_manager {
- struct kobject kobj;
- struct shrinker mm_shrink;
- struct ttm_pool_opts options;
-
- union {
- struct ttm_page_pool pools[NUM_POOLS];
- struct {
- struct ttm_page_pool wc_pool;
- struct ttm_page_pool uc_pool;
- struct ttm_page_pool wc_pool_dma32;
- struct ttm_page_pool uc_pool_dma32;
- struct ttm_page_pool wc_pool_huge;
- struct ttm_page_pool uc_pool_huge;
- } ;
- };
-};
-
-static struct attribute ttm_page_pool_max = {
- .name = "pool_max_size",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_small = {
- .name = "pool_small_allocation",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_alloc_size = {
- .name = "pool_allocation_size",
- .mode = S_IRUGO | S_IWUSR
-};
-
-static struct attribute *ttm_pool_attrs[] = {
- &ttm_page_pool_max,
- &ttm_page_pool_small,
- &ttm_page_pool_alloc_size,
- NULL
-};
-
-static void ttm_pool_kobj_release(struct kobject *kobj)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- kfree(m);
-}
-
-static ssize_t ttm_pool_store(struct kobject *kobj,
- struct attribute *attr, const char *buffer, size_t size)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- int chars;
- unsigned val;
- chars = sscanf(buffer, "%u", &val);
- if (chars == 0)
- return size;
-
- /* Convert kb to number of pages */
- val = val / (PAGE_SIZE >> 10);
-
- if (attr == &ttm_page_pool_max)
- m->options.max_size = val;
- else if (attr == &ttm_page_pool_small)
- m->options.small = val;
- else if (attr == &ttm_page_pool_alloc_size) {
- if (val > NUM_PAGES_TO_ALLOC*8) {
- pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
- return size;
- } else if (val > NUM_PAGES_TO_ALLOC) {
- pr_warn("Setting allocation size to larger than %lu is not recommended\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
- }
- m->options.alloc_size = val;
- }
-
- return size;
-}
-
-static ssize_t ttm_pool_show(struct kobject *kobj,
- struct attribute *attr, char *buffer)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- unsigned val = 0;
-
- if (attr == &ttm_page_pool_max)
- val = m->options.max_size;
- else if (attr == &ttm_page_pool_small)
- val = m->options.small;
- else if (attr == &ttm_page_pool_alloc_size)
- val = m->options.alloc_size;
-
- val = val * (PAGE_SIZE >> 10);
-
- return snprintf(buffer, PAGE_SIZE, "%u\n", val);
-}
-
-static const struct sysfs_ops ttm_pool_sysfs_ops = {
- .show = &ttm_pool_show,
- .store = &ttm_pool_store,
-};
-
-static struct kobj_type ttm_pool_kobj_type = {
- .release = &ttm_pool_kobj_release,
- .sysfs_ops = &ttm_pool_sysfs_ops,
- .default_attrs = ttm_pool_attrs,
-};
-
-static struct ttm_pool_manager *_manager;
-
-/**
- * Select the right pool or requested caching state and ttm flags. */
-static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
- enum ttm_caching cstate)
-{
- int pool_index;
-
- if (cstate == ttm_cached)
- return NULL;
-
- if (cstate == ttm_write_combined)
- pool_index = 0x0;
- else
- pool_index = 0x1;
-
- if (flags & TTM_PAGE_FLAG_DMA32) {
- if (huge)
- return NULL;
- pool_index |= 0x2;
-
- } else if (huge) {
- pool_index |= 0x4;
- }
-
- return &_manager->pools[pool_index];
-}
-
-/* set memory back to wb and free the pages. */
-static void ttm_pages_put(struct page *pages[], unsigned npages,
- unsigned int order)
-{
- unsigned int i, pages_nr = (1 << order);
-
- if (order == 0) {
- if (ttm_set_pages_array_wb(pages, npages))
- pr_err("Failed to set %d pages to wb!\n", npages);
- }
-
- for (i = 0; i < npages; ++i) {
- if (order > 0) {
- if (ttm_set_pages_wb(pages[i], pages_nr))
- pr_err("Failed to set %d pages to wb!\n", pages_nr);
- }
- __free_pages(pages[i], order);
- }
-}
-
-static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
- unsigned freed_pages)
-{
- pool->npages -= freed_pages;
- pool->nfrees += freed_pages;
-}
-
-/**
- * Free pages from pool.
- *
- * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
- * number of pages in one go.
- *
- * @pool: to free the pages from
- * @free_all: If set to true will free all pages in pool
- * @use_static: Safe to use static buffer
- **/
-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
- bool use_static)
-{
- static struct page *static_buf[NUM_PAGES_TO_ALLOC];
- unsigned long irq_flags;
- struct page *p;
- struct page **pages_to_free;
- unsigned freed_pages = 0,
- npages_to_free = nr_free;
-
- if (NUM_PAGES_TO_ALLOC < nr_free)
- npages_to_free = NUM_PAGES_TO_ALLOC;
-
- if (use_static)
- pages_to_free = static_buf;
- else
- pages_to_free = kmalloc_array(npages_to_free,
- sizeof(struct page *),
- GFP_KERNEL);
- if (!pages_to_free) {
- pr_debug("Failed to allocate memory for pool free operation\n");
- return 0;
- }
-
-restart:
- spin_lock_irqsave(&pool->lock, irq_flags);
-
- list_for_each_entry_reverse(p, &pool->list, lru) {
- if (freed_pages >= npages_to_free)
- break;
-
- pages_to_free[freed_pages++] = p;
- /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
- if (freed_pages >= NUM_PAGES_TO_ALLOC) {
- /* remove range of pages from the pool */
- __list_del(p->lru.prev, &pool->list);
-
- ttm_pool_update_free_locked(pool, freed_pages);
- /**
- * Because changing page caching is costly
- * we unlock the pool to prevent stalling.
- */
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- ttm_pages_put(pages_to_free, freed_pages, pool->order);
- if (likely(nr_free != FREE_ALL_PAGES))
- nr_free -= freed_pages;
-
- if (NUM_PAGES_TO_ALLOC >= nr_free)
- npages_to_free = nr_free;
- else
- npages_to_free = NUM_PAGES_TO_ALLOC;
-
- freed_pages = 0;
-
- /* free all so restart the processing */
- if (nr_free)
- goto restart;
-
- /* Not allowed to fall through or break because
- * following context is inside spinlock while we are
- * outside here.
- */
- goto out;
-
- }
- }
-
- /* remove range of pages from the pool */
- if (freed_pages) {
- __list_del(&p->lru, &pool->list);
-
- ttm_pool_update_free_locked(pool, freed_pages);
- nr_free -= freed_pages;
- }
-
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- if (freed_pages)
- ttm_pages_put(pages_to_free, freed_pages, pool->order);
-out:
- if (pages_to_free != static_buf)
- kfree(pages_to_free);
- return nr_free;
-}
-
-/**
- * Callback for mm to request pool to reduce number of page held.
- *
- * XXX: (dchinner) Deadlock warning!
- *
- * This code is crying out for a shrinker per pool....
- */
-static unsigned long
-ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
-{
- static DEFINE_MUTEX(lock);
- static unsigned start_pool;
- unsigned i;
- unsigned pool_offset;
- struct ttm_page_pool *pool;
- int shrink_pages = sc->nr_to_scan;
- unsigned long freed = 0;
- unsigned int nr_free_pool;
-
- if (!mutex_trylock(&lock))
- return SHRINK_STOP;
- pool_offset = ++start_pool % NUM_POOLS;
- /* select start pool in round robin fashion */
- for (i = 0; i < NUM_POOLS; ++i) {
- unsigned nr_free = shrink_pages;
- unsigned page_nr;
-
- if (shrink_pages == 0)
- break;
-
- pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
- page_nr = (1 << pool->order);
- /* OK to use static buffer since global mutex is held. */
- nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
- shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
- freed += (nr_free_pool - shrink_pages) << pool->order;
- if (freed >= sc->nr_to_scan)
- break;
- shrink_pages <<= pool->order;
- }
- mutex_unlock(&lock);
- return freed;
-}
-
-
-static unsigned long
-ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
-{
- unsigned i;
- unsigned long count = 0;
- struct ttm_page_pool *pool;
-
- for (i = 0; i < NUM_POOLS; ++i) {
- pool = &_manager->pools[i];
- count += (pool->npages << pool->order);
- }
-
- return count;
-}
-
-static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
-{
- manager->mm_shrink.count_objects = ttm_pool_shrink_count;
- manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
- manager->mm_shrink.seeks = 1;
- return register_shrinker(&manager->mm_shrink);
-}
-
-static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
-{
- unregister_shrinker(&manager->mm_shrink);
-}
-
-static int ttm_set_pages_caching(struct page **pages,
- enum ttm_caching cstate, unsigned cpages)
-{
- int r = 0;
- /* Set page caching */
- switch (cstate) {
- case ttm_uncached:
- r = ttm_set_pages_array_uc(pages, cpages);
- if (r)
- pr_err("Failed to set %d pages to uc!\n", cpages);
- break;
- case ttm_write_combined:
- r = ttm_set_pages_array_wc(pages, cpages);
- if (r)
- pr_err("Failed to set %d pages to wc!\n", cpages);
- break;
- default:
- break;
- }
- return r;
-}
-
-/**
- * Free pages the pages that failed to change the caching state. If there is
- * any pages that have changed their caching state already put them to the
- * pool.
- */
-static void ttm_handle_caching_failure(struct page **failed_pages,
- unsigned cpages)
-{
- unsigned i;
-
- /* Failed pages have to be freed */
- for (i = 0; i < cpages; ++i) {
- list_del(&failed_pages[i]->lru);
- __free_page(failed_pages[i]);
- }
-}
-
-/**
- * Allocate new pages with correct caching.
- *
- * This function is reentrant if caller updates count depending on number of
- * pages returned in pages array.
- */
-static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
- int ttm_flags, enum ttm_caching cstate,
- unsigned count, unsigned order)
-{
- struct page **caching_array;
- struct page *p;
- int r = 0;
- unsigned i, j, cpages;
- unsigned npages = 1 << order;
- unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
-
- /* allocate array for page caching change */
- caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
- GFP_KERNEL);
-
- if (!caching_array) {
- pr_debug("Unable to allocate table for new pages\n");
- return -ENOMEM;
- }
-
- for (i = 0, cpages = 0; i < count; ++i) {
- p = alloc_pages(gfp_flags, order);
-
- if (!p) {
- pr_debug("Unable to get page %u\n", i);
-
- /* store already allocated pages in the pool after
- * setting the caching state */
- if (cpages) {
- r = ttm_set_pages_caching(caching_array,
- cstate, cpages);
- if (r)
- ttm_handle_caching_failure(caching_array,
- cpages);
- }
- r = -ENOMEM;
- goto out;
- }
-
- list_add(&p->lru, pages);
-
-#ifdef CONFIG_HIGHMEM
- /* gfp flags of highmem page should never be dma32 so we
- * we should be fine in such case
- */
- if (PageHighMem(p))
- continue;
-
-#endif
- for (j = 0; j < npages; ++j) {
- caching_array[cpages++] = p++;
- if (cpages == max_cpages) {
-
- r = ttm_set_pages_caching(caching_array,
- cstate, cpages);
- if (r) {
- ttm_handle_caching_failure(caching_array,
- cpages);
- goto out;
- }
- cpages = 0;
- }
- }
- }
-
- if (cpages) {
- r = ttm_set_pages_caching(caching_array, cstate, cpages);
- if (r)
- ttm_handle_caching_failure(caching_array, cpages);
- }
-out:
- kfree(caching_array);
-
- return r;
-}
-
-/**
- * Fill the given pool if there aren't enough pages and the requested number of
- * pages is small.
- */
-static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
- enum ttm_caching cstate,
- unsigned count, unsigned long *irq_flags)
-{
- struct page *p;
- int r;
- unsigned cpages = 0;
- /**
- * Only allow one pool fill operation at a time.
- * If pool doesn't have enough pages for the allocation new pages are
- * allocated from outside of pool.
- */
- if (pool->fill_lock)
- return;
-
- pool->fill_lock = true;
-
- /* If allocation request is small and there are not enough
- * pages in a pool we fill the pool up first. */
- if (count < _manager->options.small
- && count > pool->npages) {
- struct list_head new_pages;
- unsigned alloc_size = _manager->options.alloc_size;
-
- /**
- * Can't change page caching if in irqsave context. We have to
- * drop the pool->lock.
- */
- spin_unlock_irqrestore(&pool->lock, *irq_flags);
-
- INIT_LIST_HEAD(&new_pages);
- r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
- cstate, alloc_size, 0);
- spin_lock_irqsave(&pool->lock, *irq_flags);
-
- if (!r) {
- list_splice(&new_pages, &pool->list);
- ++pool->nrefills;
- pool->npages += alloc_size;
- } else {
- pr_debug("Failed to fill pool (%p)\n", pool);
- /* If we have any pages left put them to the pool. */
- list_for_each_entry(p, &new_pages, lru) {
- ++cpages;
- }
- list_splice(&new_pages, &pool->list);
- pool->npages += cpages;
- }
-
- }
- pool->fill_lock = false;
-}
-
-/**
- * Allocate pages from the pool and put them on the return list.
- *
- * @return zero for success or negative error code.
- */
-static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
- struct list_head *pages,
- int ttm_flags,
- enum ttm_caching cstate,
- unsigned count, unsigned order)
-{
- unsigned long irq_flags;
- struct list_head *p;
- unsigned i;
- int r = 0;
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- if (!order)
- ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
- &irq_flags);
-
- if (count >= pool->npages) {
- /* take all pages from the pool */
- list_splice_init(&pool->list, pages);
- count -= pool->npages;
- pool->npages = 0;
- goto out;
- }
- /* find the last pages to include for requested number of pages. Split
- * pool to begin and halve it to reduce search space. */
- if (count <= pool->npages/2) {
- i = 0;
- list_for_each(p, &pool->list) {
- if (++i == count)
- break;
- }
- } else {
- i = pool->npages + 1;
- list_for_each_prev(p, &pool->list) {
- if (--i == count)
- break;
- }
- }
- /* Cut 'count' number of pages from the pool */
- list_cut_position(pages, &pool->list, p);
- pool->npages -= count;
- count = 0;
-out:
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- /* clear the pages coming from the pool if requested */
- if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
- struct page *page;
-
- list_for_each_entry(page, pages, lru) {
- if (PageHighMem(page))
- clear_highpage(page);
- else
- clear_page(page_address(page));
- }
- }
-
- /* If pool didn't have enough pages allocate new one. */
- if (count) {
- gfp_t gfp_flags = pool->gfp_flags;
-
- /* set zero flag for page allocation if required */
- if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
- if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY)
- gfp_flags |= __GFP_RETRY_MAYFAIL;
-
- /* ttm_alloc_new_pages doesn't reference pool so we can run
- * multiple requests in parallel.
- **/
- r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate,
- count, order);
- }
-
- return r;
-}
-
-/* Put all pages in pages list to correct pool to wait for reuse */
-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
- enum ttm_caching cstate)
-{
- struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
-#endif
- unsigned long irq_flags;
- unsigned i;
-
- if (pool == NULL) {
- /* No pool for this memory type so free the pages */
- i = 0;
- while (i < npages) {
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- struct page *p = pages[i];
-#endif
- unsigned order = 0, j;
-
- if (!pages[i]) {
- ++i;
- continue;
- }
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (!(flags & TTM_PAGE_FLAG_DMA32) &&
- (npages - i) >= HPAGE_PMD_NR) {
- for (j = 1; j < HPAGE_PMD_NR; ++j)
- if (++p != pages[i + j])
- break;
-
- if (j == HPAGE_PMD_NR)
- order = HPAGE_PMD_ORDER;
- }
-#endif
-
- if (page_count(pages[i]) != 1)
- pr_err("Erroneous page count. Leaking pages.\n");
- __free_pages(pages[i], order);
-
- j = 1 << order;
- while (j) {
- pages[i++] = NULL;
- --j;
- }
- }
- return;
- }
-
- i = 0;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (huge) {
- unsigned max_size, n2free;
-
- spin_lock_irqsave(&huge->lock, irq_flags);
- while ((npages - i) >= HPAGE_PMD_NR) {
- struct page *p = pages[i];
- unsigned j;
-
- if (!p)
- break;
-
- for (j = 1; j < HPAGE_PMD_NR; ++j)
- if (++p != pages[i + j])
- break;
-
- if (j != HPAGE_PMD_NR)
- break;
-
- list_add_tail(&pages[i]->lru, &huge->list);
-
- for (j = 0; j < HPAGE_PMD_NR; ++j)
- pages[i++] = NULL;
- huge->npages++;
- }
-
- /* Check that we don't go over the pool limit */
- max_size = _manager->options.max_size;
- max_size /= HPAGE_PMD_NR;
- if (huge->npages > max_size)
- n2free = huge->npages - max_size;
- else
- n2free = 0;
- spin_unlock_irqrestore(&huge->lock, irq_flags);
- if (n2free)
- ttm_page_pool_free(huge, n2free, false);
- }
-#endif
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- while (i < npages) {
- if (pages[i]) {
- if (page_count(pages[i]) != 1)
- pr_err("Erroneous page count. Leaking pages.\n");
- list_add_tail(&pages[i]->lru, &pool->list);
- pages[i] = NULL;
- pool->npages++;
- }
- ++i;
- }
- /* Check that we don't go over the pool limit */
- npages = 0;
- if (pool->npages > _manager->options.max_size) {
- npages = pool->npages - _manager->options.max_size;
- /* free at least NUM_PAGES_TO_ALLOC number of pages
- * to reduce calls to set_memory_wb */
- if (npages < NUM_PAGES_TO_ALLOC)
- npages = NUM_PAGES_TO_ALLOC;
- }
- spin_unlock_irqrestore(&pool->lock, irq_flags);
- if (npages)
- ttm_page_pool_free(pool, npages, false);
-}
-
-/*
- * On success pages list will hold count number of correctly
- * cached pages.
- */
-static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
- enum ttm_caching cstate)
-{
- struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
-#endif
- struct list_head plist;
- struct page *p = NULL;
- unsigned count, first;
- int r;
-
- /* No pool for cached pages */
- if (pool == NULL) {
- gfp_t gfp_flags = GFP_USER;
- unsigned i;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- unsigned j;
-#endif
-
- /* set zero flag for page allocation if required */
- if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
- if (flags & TTM_PAGE_FLAG_NO_RETRY)
- gfp_flags |= __GFP_RETRY_MAYFAIL;
-
- if (flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags |= GFP_DMA32;
- else
- gfp_flags |= GFP_HIGHUSER;
-
- i = 0;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (!(gfp_flags & GFP_DMA32)) {
- while (npages >= HPAGE_PMD_NR) {
- gfp_t huge_flags = gfp_flags;
-
- huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
- __GFP_KSWAPD_RECLAIM;
- huge_flags &= ~__GFP_MOVABLE;
- huge_flags &= ~__GFP_COMP;
- p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
- if (!p)
- break;
-
- for (j = 0; j < HPAGE_PMD_NR; ++j)
- pages[i++] = p++;
-
- npages -= HPAGE_PMD_NR;
- }
- }
-#endif
-
- first = i;
- while (npages) {
- p = alloc_page(gfp_flags);
- if (!p) {
- pr_debug("Unable to allocate page\n");
- return -ENOMEM;
- }
-
- /* Swap the pages if we detect consecutive order */
- if (i > first && pages[i - 1] == p - 1)
- swap(p, pages[i - 1]);
-
- pages[i++] = p;
- --npages;
- }
- return 0;
- }
-
- count = 0;
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (huge && npages >= HPAGE_PMD_NR) {
- INIT_LIST_HEAD(&plist);
- ttm_page_pool_get_pages(huge, &plist, flags, cstate,
- npages / HPAGE_PMD_NR,
- HPAGE_PMD_ORDER);
-
- list_for_each_entry(p, &plist, lru) {
- unsigned j;
-
- for (j = 0; j < HPAGE_PMD_NR; ++j)
- pages[count++] = &p[j];
- }
- }
-#endif
-
- INIT_LIST_HEAD(&plist);
- r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
- npages - count, 0);
-
- first = count;
- list_for_each_entry(p, &plist, lru) {
- struct page *tmp = p;
-
- /* Swap the pages if we detect consecutive order */
- if (count > first && pages[count - 1] == tmp - 1)
- swap(tmp, pages[count - 1]);
- pages[count++] = tmp;
- }
-
- if (r) {
- /* If there is any pages in the list put them back to
- * the pool.
- */
- pr_debug("Failed to allocate extra pages for large request\n");
- ttm_put_pages(pages, count, flags, cstate);
- return r;
- }
-
- return 0;
-}
-
-static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
- char *name, unsigned int order)
-{
- spin_lock_init(&pool->lock);
- pool->fill_lock = false;
- INIT_LIST_HEAD(&pool->list);
- pool->npages = pool->nfrees = 0;
- pool->gfp_flags = flags;
- pool->name = name;
- pool->order = order;
-}
-
-int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
-{
- int ret;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- unsigned order = HPAGE_PMD_ORDER;
-#else
- unsigned order = 0;
-#endif
-
- WARN_ON(_manager);
-
- pr_info("Initializing pool allocator\n");
-
- _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
- if (!_manager)
- return -ENOMEM;
-
- ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
-
- ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
-
- ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
- GFP_USER | GFP_DMA32, "wc dma", 0);
-
- ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
- GFP_USER | GFP_DMA32, "uc dma", 0);
-
- ttm_page_pool_init_locked(&_manager->wc_pool_huge,
- (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
- __GFP_KSWAPD_RECLAIM) &
- ~(__GFP_MOVABLE | __GFP_COMP),
- "wc huge", order);
-
- ttm_page_pool_init_locked(&_manager->uc_pool_huge,
- (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
- __GFP_KSWAPD_RECLAIM) &
- ~(__GFP_MOVABLE | __GFP_COMP)
- , "uc huge", order);
-
- _manager->options.max_size = max_pages;
- _manager->options.small = SMALL_ALLOCATION;
- _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
-
- ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
- &glob->kobj, "pool");
- if (unlikely(ret != 0))
- goto error;
-
- ret = ttm_pool_mm_shrink_init(_manager);
- if (unlikely(ret != 0))
- goto error;
- return 0;
-
-error:
- kobject_put(&_manager->kobj);
- _manager = NULL;
- return ret;
-}
-
-void ttm_page_alloc_fini(void)
-{
- int i;
-
- pr_info("Finalizing pool allocator\n");
- ttm_pool_mm_shrink_fini(_manager);
-
- /* OK to use static buffer since global mutex is no longer used. */
- for (i = 0; i < NUM_POOLS; ++i)
- ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
-
- kobject_put(&_manager->kobj);
- _manager = NULL;
-}
-
-static void
-ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
-{
- struct ttm_mem_global *mem_glob = &ttm_mem_glob;
- unsigned i;
-
- if (mem_count_update == 0)
- goto put_pages;
-
- for (i = 0; i < mem_count_update; ++i) {
- if (!ttm->pages[i])
- continue;
-
- ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE);
- }
-
-put_pages:
- ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
- ttm->caching);
-}
-
-int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
-{
- struct ttm_mem_global *mem_glob = &ttm_mem_glob;
- unsigned i;
- int ret;
-
- if (ttm_tt_is_populated(ttm))
- return 0;
-
- if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx))
- return -ENOMEM;
-
- ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
- ttm->caching);
- if (unlikely(ret != 0)) {
- ttm_pool_unpopulate_helper(ttm, 0);
- return ret;
- }
-
- for (i = 0; i < ttm->num_pages; ++i) {
- ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- PAGE_SIZE, ctx);
- if (unlikely(ret != 0)) {
- ttm_pool_unpopulate_helper(ttm, i);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL(ttm_pool_populate);
-
-void ttm_pool_unpopulate(struct ttm_tt *ttm)
-{
- ttm_pool_unpopulate_helper(ttm, ttm->num_pages);
-}
-EXPORT_SYMBOL(ttm_pool_unpopulate);
-
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_tt *tt,
- struct ttm_operation_ctx *ctx)
-{
- unsigned i, j;
- int r;
-
- r = ttm_pool_populate(tt, ctx);
- if (r)
- return r;
-
- for (i = 0; i < tt->num_pages; ++i) {
- struct page *p = tt->pages[i];
- size_t num_pages = 1;
-
- for (j = i + 1; j < tt->num_pages; ++j) {
- if (++p != tt->pages[j])
- break;
-
- ++num_pages;
- }
-
- tt->dma_address[i] = dma_map_page(dev, tt->pages[i],
- 0, num_pages * PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, tt->dma_address[i])) {
- while (i--) {
- dma_unmap_page(dev, tt->dma_address[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- tt->dma_address[i] = 0;
- }
- ttm_pool_unpopulate(tt);
- return -EFAULT;
- }
-
- for (j = 1; j < num_pages; ++j) {
- tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE;
- ++i;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(ttm_populate_and_map_pages);
-
-void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_tt *tt)
-{
- unsigned i, j;
-
- for (i = 0; i < tt->num_pages;) {
- struct page *p = tt->pages[i];
- size_t num_pages = 1;
-
- if (!tt->dma_address[i] || !tt->pages[i]) {
- ++i;
- continue;
- }
-
- for (j = i + 1; j < tt->num_pages; ++j) {
- if (++p != tt->pages[j])
- break;
-
- ++num_pages;
- }
-
- dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE,
- DMA_BIDIRECTIONAL);
-
- i += num_pages;
- }
- ttm_pool_unpopulate(tt);
-}
-EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
-
-int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
-{
- struct ttm_page_pool *p;
- unsigned i;
- char *h[] = {"pool", "refills", "pages freed", "size"};
- if (!_manager) {
- seq_printf(m, "No pool allocator running.\n");
- return 0;
- }
- seq_printf(m, "%7s %12s %13s %8s\n",
- h[0], h[1], h[2], h[3]);
- for (i = 0; i < NUM_POOLS; ++i) {
- p = &_manager->pools[i];
-
- seq_printf(m, "%7s %12ld %13ld %8d\n",
- p->name, p->nrefills,
- p->nfrees, p->npages);
- }
- return 0;
-}
-EXPORT_SYMBOL(ttm_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
deleted file mode 100644
index c0353c25efd6..000000000000
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ /dev/null
@@ -1,1226 +0,0 @@
-/*
- * Copyright 2011 (c) Oracle Corp.
-
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
- */
-
-/*
- * A simple DMA pool losely based on dmapool.c. It has certain advantages
- * over the DMA pools:
- * - Pool collects resently freed pages for reuse (and hooks up to
- * the shrinker).
- * - Tracks currently in use pages
- * - Tracks whether the page is UC, WB or cached (and reverts to WB
- * when freed).
- */
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
-#include <linux/dma-mapping.h>
-#include <linux/list.h>
-#include <linux/seq_file.h> /* for seq_printf */
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/highmem.h>
-#include <linux/mm_types.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/atomic.h>
-#include <linux/device.h>
-#include <linux/kthread.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
-
-#include "ttm_set_memory.h"
-
-#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
-#define SMALL_ALLOCATION 4
-#define FREE_ALL_PAGES (~0U)
-#define VADDR_FLAG_HUGE_POOL 1UL
-#define VADDR_FLAG_UPDATED_COUNT 2UL
-
-enum pool_type {
- IS_UNDEFINED = 0,
- IS_WC = 1 << 1,
- IS_UC = 1 << 2,
- IS_CACHED = 1 << 3,
- IS_DMA32 = 1 << 4,
- IS_HUGE = 1 << 5
-};
-
-/*
- * The pool structure. There are up to nine pools:
- * - generic (not restricted to DMA32):
- * - write combined, uncached, cached.
- * - dma32 (up to 2^32 - so up 4GB):
- * - write combined, uncached, cached.
- * - huge (not restricted to DMA32):
- * - write combined, uncached, cached.
- * for each 'struct device'. The 'cached' is for pages that are actively used.
- * The other ones can be shrunk by the shrinker API if neccessary.
- * @pools: The 'struct device->dma_pools' link.
- * @type: Type of the pool
- * @lock: Protects the free_list from concurrnet access. Must be
- * used with irqsave/irqrestore variants because pool allocator maybe called
- * from delayed work.
- * @free_list: Pool of pages that are free to be used. No order requirements.
- * @dev: The device that is associated with these pools.
- * @size: Size used during DMA allocation.
- * @npages_free: Count of available pages for re-use.
- * @npages_in_use: Count of pages that are in use.
- * @nfrees: Stats when pool is shrinking.
- * @nrefills: Stats when the pool is grown.
- * @gfp_flags: Flags to pass for alloc_page.
- * @name: Name of the pool.
- * @dev_name: Name derieved from dev - similar to how dev_info works.
- * Used during shutdown as the dev_info during release is unavailable.
- */
-struct dma_pool {
- struct list_head pools; /* The 'struct device->dma_pools link */
- enum pool_type type;
- spinlock_t lock;
- struct list_head free_list;
- struct device *dev;
- unsigned size;
- unsigned npages_free;
- unsigned npages_in_use;
- unsigned long nfrees; /* Stats when shrunk. */
- unsigned long nrefills; /* Stats when grown. */
- gfp_t gfp_flags;
- char name[13]; /* "cached dma32" */
- char dev_name[64]; /* Constructed from dev */
-};
-
-/*
- * The accounting page keeping track of the allocated page along with
- * the DMA address.
- * @page_list: The link to the 'page_list' in 'struct dma_pool'.
- * @vaddr: The virtual address of the page and a flag if the page belongs to a
- * huge pool
- * @dma: The bus address of the page. If the page is not allocated
- * via the DMA API, it will be -1.
- */
-struct dma_page {
- struct list_head page_list;
- unsigned long vaddr;
- struct page *p;
- dma_addr_t dma;
-};
-
-/*
- * Limits for the pool. They are handled without locks because only place where
- * they may change is in sysfs store. They won't have immediate effect anyway
- * so forcing serialization to access them is pointless.
- */
-
-struct ttm_pool_opts {
- unsigned alloc_size;
- unsigned max_size;
- unsigned small;
-};
-
-/*
- * Contains the list of all of the 'struct device' and their corresponding
- * DMA pools. Guarded by _mutex->lock.
- * @pools: The link to 'struct ttm_pool_manager->pools'
- * @dev: The 'struct device' associated with the 'pool'
- * @pool: The 'struct dma_pool' associated with the 'dev'
- */
-struct device_pools {
- struct list_head pools;
- struct device *dev;
- struct dma_pool *pool;
-};
-
-/*
- * struct ttm_pool_manager - Holds memory pools for fast allocation
- *
- * @lock: Lock used when adding/removing from pools
- * @pools: List of 'struct device' and 'struct dma_pool' tuples.
- * @options: Limits for the pool.
- * @npools: Total amount of pools in existence.
- * @shrinker: The structure used by [un|]register_shrinker
- */
-struct ttm_pool_manager {
- struct mutex lock;
- struct list_head pools;
- struct ttm_pool_opts options;
- unsigned npools;
- struct shrinker mm_shrink;
- struct kobject kobj;
-};
-
-static struct ttm_pool_manager *_manager;
-
-static struct attribute ttm_page_pool_max = {
- .name = "pool_max_size",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_small = {
- .name = "pool_small_allocation",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_alloc_size = {
- .name = "pool_allocation_size",
- .mode = S_IRUGO | S_IWUSR
-};
-
-static struct attribute *ttm_pool_attrs[] = {
- &ttm_page_pool_max,
- &ttm_page_pool_small,
- &ttm_page_pool_alloc_size,
- NULL
-};
-
-static void ttm_pool_kobj_release(struct kobject *kobj)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- kfree(m);
-}
-
-static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t size)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- int chars;
- unsigned val;
-
- chars = sscanf(buffer, "%u", &val);
- if (chars == 0)
- return size;
-
- /* Convert kb to number of pages */
- val = val / (PAGE_SIZE >> 10);
-
- if (attr == &ttm_page_pool_max) {
- m->options.max_size = val;
- } else if (attr == &ttm_page_pool_small) {
- m->options.small = val;
- } else if (attr == &ttm_page_pool_alloc_size) {
- if (val > NUM_PAGES_TO_ALLOC*8) {
- pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
- return size;
- } else if (val > NUM_PAGES_TO_ALLOC) {
- pr_warn("Setting allocation size to larger than %lu is not recommended\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
- }
- m->options.alloc_size = val;
- }
-
- return size;
-}
-
-static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
- char *buffer)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- unsigned val = 0;
-
- if (attr == &ttm_page_pool_max)
- val = m->options.max_size;
- else if (attr == &ttm_page_pool_small)
- val = m->options.small;
- else if (attr == &ttm_page_pool_alloc_size)
- val = m->options.alloc_size;
-
- val = val * (PAGE_SIZE >> 10);
-
- return snprintf(buffer, PAGE_SIZE, "%u\n", val);
-}
-
-static const struct sysfs_ops ttm_pool_sysfs_ops = {
- .show = &ttm_pool_show,
- .store = &ttm_pool_store,
-};
-
-static struct kobj_type ttm_pool_kobj_type = {
- .release = &ttm_pool_kobj_release,
- .sysfs_ops = &ttm_pool_sysfs_ops,
- .default_attrs = ttm_pool_attrs,
-};
-
-static int ttm_set_pages_caching(struct dma_pool *pool,
- struct page **pages, unsigned cpages)
-{
- int r = 0;
- /* Set page caching */
- if (pool->type & IS_UC) {
- r = ttm_set_pages_array_uc(pages, cpages);
- if (r)
- pr_err("%s: Failed to set %d pages to uc!\n",
- pool->dev_name, cpages);
- }
- if (pool->type & IS_WC) {
- r = ttm_set_pages_array_wc(pages, cpages);
- if (r)
- pr_err("%s: Failed to set %d pages to wc!\n",
- pool->dev_name, cpages);
- }
- return r;
-}
-
-static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
-{
- unsigned long attrs = 0;
- dma_addr_t dma = d_page->dma;
- d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
- if (pool->type & IS_HUGE)
- attrs = DMA_ATTR_NO_WARN;
-
- dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs);
-
- kfree(d_page);
- d_page = NULL;
-}
-static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
-{
- struct dma_page *d_page;
- unsigned long attrs = 0;
- void *vaddr;
-
- d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
- if (!d_page)
- return NULL;
-
- if (pool->type & IS_HUGE)
- attrs = DMA_ATTR_NO_WARN;
-
- vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma,
- pool->gfp_flags, attrs);
- if (vaddr) {
- if (is_vmalloc_addr(vaddr))
- d_page->p = vmalloc_to_page(vaddr);
- else
- d_page->p = virt_to_page(vaddr);
- d_page->vaddr = (unsigned long)vaddr;
- if (pool->type & IS_HUGE)
- d_page->vaddr |= VADDR_FLAG_HUGE_POOL;
- } else {
- kfree(d_page);
- d_page = NULL;
- }
- return d_page;
-}
-static enum pool_type ttm_to_type(int flags, enum ttm_caching cstate)
-{
- enum pool_type type = IS_UNDEFINED;
-
- if (flags & TTM_PAGE_FLAG_DMA32)
- type |= IS_DMA32;
- if (cstate == ttm_cached)
- type |= IS_CACHED;
- else if (cstate == ttm_uncached)
- type |= IS_UC;
- else
- type |= IS_WC;
-
- return type;
-}
-
-static void ttm_pool_update_free_locked(struct dma_pool *pool,
- unsigned freed_pages)
-{
- pool->npages_free -= freed_pages;
- pool->nfrees += freed_pages;
-
-}
-
-/* set memory back to wb and free the pages. */
-static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
-{
- struct page *page = d_page->p;
- unsigned num_pages;
-
- /* Don't set WB on WB page pool. */
- if (!(pool->type & IS_CACHED)) {
- num_pages = pool->size / PAGE_SIZE;
- if (ttm_set_pages_wb(page, num_pages))
- pr_err("%s: Failed to set %d pages to wb!\n",
- pool->dev_name, num_pages);
- }
-
- list_del(&d_page->page_list);
- __ttm_dma_free_page(pool, d_page);
-}
-
-static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
- struct page *pages[], unsigned npages)
-{
- struct dma_page *d_page, *tmp;
-
- if (pool->type & IS_HUGE) {
- list_for_each_entry_safe(d_page, tmp, d_pages, page_list)
- ttm_dma_page_put(pool, d_page);
-
- return;
- }
-
- /* Don't set WB on WB page pool. */
- if (npages && !(pool->type & IS_CACHED) &&
- ttm_set_pages_array_wb(pages, npages))
- pr_err("%s: Failed to set %d pages to wb!\n",
- pool->dev_name, npages);
-
- list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
- list_del(&d_page->page_list);
- __ttm_dma_free_page(pool, d_page);
- }
-}
-
-/*
- * Free pages from pool.
- *
- * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
- * number of pages in one go.
- *
- * @pool: to free the pages from
- * @nr_free: If set to true will free all pages in pool
- * @use_static: Safe to use static buffer
- **/
-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
- bool use_static)
-{
- static struct page *static_buf[NUM_PAGES_TO_ALLOC];
- unsigned long irq_flags;
- struct dma_page *dma_p, *tmp;
- struct page **pages_to_free;
- struct list_head d_pages;
- unsigned freed_pages = 0,
- npages_to_free = nr_free;
-
- if (NUM_PAGES_TO_ALLOC < nr_free)
- npages_to_free = NUM_PAGES_TO_ALLOC;
-
- if (use_static)
- pages_to_free = static_buf;
- else
- pages_to_free = kmalloc_array(npages_to_free,
- sizeof(struct page *),
- GFP_KERNEL);
-
- if (!pages_to_free) {
- pr_debug("%s: Failed to allocate memory for pool free operation\n",
- pool->dev_name);
- return 0;
- }
- INIT_LIST_HEAD(&d_pages);
-restart:
- spin_lock_irqsave(&pool->lock, irq_flags);
-
- /* We picking the oldest ones off the list */
- list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
- page_list) {
- if (freed_pages >= npages_to_free)
- break;
-
- /* Move the dma_page from one list to another. */
- list_move(&dma_p->page_list, &d_pages);
-
- pages_to_free[freed_pages++] = dma_p->p;
- /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
- if (freed_pages >= NUM_PAGES_TO_ALLOC) {
-
- ttm_pool_update_free_locked(pool, freed_pages);
- /**
- * Because changing page caching is costly
- * we unlock the pool to prevent stalling.
- */
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- ttm_dma_pages_put(pool, &d_pages, pages_to_free,
- freed_pages);
-
- INIT_LIST_HEAD(&d_pages);
-
- if (likely(nr_free != FREE_ALL_PAGES))
- nr_free -= freed_pages;
-
- if (NUM_PAGES_TO_ALLOC >= nr_free)
- npages_to_free = nr_free;
- else
- npages_to_free = NUM_PAGES_TO_ALLOC;
-
- freed_pages = 0;
-
- /* free all so restart the processing */
- if (nr_free)
- goto restart;
-
- /* Not allowed to fall through or break because
- * following context is inside spinlock while we are
- * outside here.
- */
- goto out;
-
- }
- }
-
- /* remove range of pages from the pool */
- if (freed_pages) {
- ttm_pool_update_free_locked(pool, freed_pages);
- nr_free -= freed_pages;
- }
-
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- if (freed_pages)
- ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
-out:
- if (pages_to_free != static_buf)
- kfree(pages_to_free);
- return nr_free;
-}
-
-static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
-{
- struct device_pools *p;
- struct dma_pool *pool;
-
- if (!dev)
- return;
-
- mutex_lock(&_manager->lock);
- list_for_each_entry_reverse(p, &_manager->pools, pools) {
- if (p->dev != dev)
- continue;
- pool = p->pool;
- if (pool->type != type)
- continue;
-
- list_del(&p->pools);
- kfree(p);
- _manager->npools--;
- break;
- }
- list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
- if (pool->type != type)
- continue;
- /* Takes a spinlock.. */
- /* OK to use static buffer since global mutex is held. */
- ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
- WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
- /* This code path is called after _all_ references to the
- * struct device has been dropped - so nobody should be
- * touching it. In case somebody is trying to _add_ we are
- * guarded by the mutex. */
- list_del(&pool->pools);
- kfree(pool);
- break;
- }
- mutex_unlock(&_manager->lock);
-}
-
-/*
- * On free-ing of the 'struct device' this deconstructor is run.
- * Albeit the pool might have already been freed earlier.
- */
-static void ttm_dma_pool_release(struct device *dev, void *res)
-{
- struct dma_pool *pool = *(struct dma_pool **)res;
-
- if (pool)
- ttm_dma_free_pool(dev, pool->type);
-}
-
-static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
-{
- return *(struct dma_pool **)res == match_data;
-}
-
-static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
- enum pool_type type)
-{
- const char *n[] = {"wc", "uc", "cached", " dma32", "huge"};
- enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
- struct device_pools *sec_pool = NULL;
- struct dma_pool *pool = NULL, **ptr;
- unsigned i;
- int ret = -ENODEV;
- char *p;
-
- if (!dev)
- return NULL;
-
- ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- ret = -ENOMEM;
-
- pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
- dev_to_node(dev));
- if (!pool)
- goto err_mem;
-
- sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
- dev_to_node(dev));
- if (!sec_pool)
- goto err_mem;
-
- INIT_LIST_HEAD(&sec_pool->pools);
- sec_pool->dev = dev;
- sec_pool->pool = pool;
-
- INIT_LIST_HEAD(&pool->free_list);
- INIT_LIST_HEAD(&pool->pools);
- spin_lock_init(&pool->lock);
- pool->dev = dev;
- pool->npages_free = pool->npages_in_use = 0;
- pool->nfrees = 0;
- pool->gfp_flags = flags;
- if (type & IS_HUGE)
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- pool->size = HPAGE_PMD_SIZE;
-#else
- BUG();
-#endif
- else
- pool->size = PAGE_SIZE;
- pool->type = type;
- pool->nrefills = 0;
- p = pool->name;
- for (i = 0; i < ARRAY_SIZE(t); i++) {
- if (type & t[i]) {
- p += scnprintf(p, sizeof(pool->name) - (p - pool->name),
- "%s", n[i]);
- }
- }
- *p = 0;
- /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
- * - the kobj->name has already been deallocated.*/
- snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
- dev_driver_string(dev), dev_name(dev));
- mutex_lock(&_manager->lock);
- /* You can get the dma_pool from either the global: */
- list_add(&sec_pool->pools, &_manager->pools);
- _manager->npools++;
- /* or from 'struct device': */
- list_add(&pool->pools, &dev->dma_pools);
- mutex_unlock(&_manager->lock);
-
- *ptr = pool;
- devres_add(dev, ptr);
-
- return pool;
-err_mem:
- devres_free(ptr);
- kfree(sec_pool);
- kfree(pool);
- return ERR_PTR(ret);
-}
-
-static struct dma_pool *ttm_dma_find_pool(struct device *dev,
- enum pool_type type)
-{
- struct dma_pool *pool, *tmp;
-
- if (type == IS_UNDEFINED)
- return NULL;
-
- /* NB: We iterate on the 'struct dev' which has no spinlock, but
- * it does have a kref which we have taken. The kref is taken during
- * graphic driver loading - in the drm_pci_init it calls either
- * pci_dev_get or pci_register_driver which both end up taking a kref
- * on 'struct device'.
- *
- * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
- * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
- * thing is at that point of time there are no pages associated with the
- * driver so this function will not be called.
- */
- list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools)
- if (pool->type == type)
- return pool;
- return NULL;
-}
-
-/*
- * Free pages the pages that failed to change the caching state. If there
- * are pages that have changed their caching state already put them to the
- * pool.
- */
-static void ttm_dma_handle_caching_failure(struct dma_pool *pool,
- struct list_head *d_pages,
- struct page **failed_pages,
- unsigned cpages)
-{
- struct dma_page *d_page, *tmp;
- struct page *p;
- unsigned i = 0;
-
- p = failed_pages[0];
- if (!p)
- return;
- /* Find the failed page. */
- list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
- if (d_page->p != p)
- continue;
- /* .. and then progress over the full list. */
- list_del(&d_page->page_list);
- __ttm_dma_free_page(pool, d_page);
- if (++i < cpages)
- p = failed_pages[i];
- else
- break;
- }
-
-}
-
-/*
- * Allocate 'count' pages, and put 'need' number of them on the
- * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
- * The full list of pages should also be on 'd_pages'.
- * We return zero for success, and negative numbers as errors.
- */
-static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
- struct list_head *d_pages,
- unsigned count)
-{
- struct page **caching_array;
- struct dma_page *dma_p;
- struct page *p;
- int r = 0;
- unsigned i, j, npages, cpages;
- unsigned max_cpages = min(count,
- (unsigned)(PAGE_SIZE/sizeof(struct page *)));
-
- /* allocate array for page caching change */
- caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
- GFP_KERNEL);
-
- if (!caching_array) {
- pr_debug("%s: Unable to allocate table for new pages\n",
- pool->dev_name);
- return -ENOMEM;
- }
-
- if (count > 1)
- pr_debug("%s: (%s:%d) Getting %d pages\n",
- pool->dev_name, pool->name, current->pid, count);
-
- for (i = 0, cpages = 0; i < count; ++i) {
- dma_p = __ttm_dma_alloc_page(pool);
- if (!dma_p) {
- pr_debug("%s: Unable to get page %u\n",
- pool->dev_name, i);
-
- /* store already allocated pages in the pool after
- * setting the caching state */
- if (cpages) {
- r = ttm_set_pages_caching(pool, caching_array,
- cpages);
- if (r)
- ttm_dma_handle_caching_failure(
- pool, d_pages, caching_array,
- cpages);
- }
- r = -ENOMEM;
- goto out;
- }
- p = dma_p->p;
- list_add(&dma_p->page_list, d_pages);
-
-#ifdef CONFIG_HIGHMEM
- /* gfp flags of highmem page should never be dma32 so we
- * we should be fine in such case
- */
- if (PageHighMem(p))
- continue;
-#endif
-
- npages = pool->size / PAGE_SIZE;
- for (j = 0; j < npages; ++j) {
- caching_array[cpages++] = p + j;
- if (cpages == max_cpages) {
- /* Note: Cannot hold the spinlock */
- r = ttm_set_pages_caching(pool, caching_array,
- cpages);
- if (r) {
- ttm_dma_handle_caching_failure(
- pool, d_pages, caching_array,
- cpages);
- goto out;
- }
- cpages = 0;
- }
- }
- }
-
- if (cpages) {
- r = ttm_set_pages_caching(pool, caching_array, cpages);
- if (r)
- ttm_dma_handle_caching_failure(pool, d_pages,
- caching_array, cpages);
- }
-out:
- kfree(caching_array);
- return r;
-}
-
-/*
- * @return count of pages still required to fulfill the request.
- */
-static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
- unsigned long *irq_flags)
-{
- unsigned count = _manager->options.small;
- int r = pool->npages_free;
-
- if (count > pool->npages_free) {
- struct list_head d_pages;
-
- INIT_LIST_HEAD(&d_pages);
-
- spin_unlock_irqrestore(&pool->lock, *irq_flags);
-
- /* Returns how many more are neccessary to fulfill the
- * request. */
- r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
-
- spin_lock_irqsave(&pool->lock, *irq_flags);
- if (!r) {
- /* Add the fresh to the end.. */
- list_splice(&d_pages, &pool->free_list);
- ++pool->nrefills;
- pool->npages_free += count;
- r = count;
- } else {
- struct dma_page *d_page;
- unsigned cpages = 0;
-
- pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
- pool->dev_name, pool->name, r);
-
- list_for_each_entry(d_page, &d_pages, page_list) {
- cpages++;
- }
- list_splice_tail(&d_pages, &pool->free_list);
- pool->npages_free += cpages;
- r = cpages;
- }
- }
- return r;
-}
-
-/*
- * The populate list is actually a stack (not that is matters as TTM
- * allocates one page at a time.
- * return dma_page pointer if success, otherwise NULL.
- */
-static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
- struct ttm_tt *ttm,
- unsigned index)
-{
- struct dma_page *d_page = NULL;
- unsigned long irq_flags;
- int count;
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
- if (count) {
- d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
- ttm->pages[index] = d_page->p;
- ttm->dma_address[index] = d_page->dma;
- list_move_tail(&d_page->page_list, &ttm->pages_list);
- pool->npages_in_use += 1;
- pool->npages_free -= 1;
- }
- spin_unlock_irqrestore(&pool->lock, irq_flags);
- return d_page;
-}
-
-static gfp_t ttm_dma_pool_gfp_flags(struct ttm_tt *ttm, bool huge)
-{
- gfp_t gfp_flags;
-
- if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags = GFP_USER | GFP_DMA32;
- else
- gfp_flags = GFP_HIGHUSER;
- if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
- if (huge) {
- gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
- __GFP_KSWAPD_RECLAIM;
- gfp_flags &= ~__GFP_MOVABLE;
- gfp_flags &= ~__GFP_COMP;
- }
-
- if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
- gfp_flags |= __GFP_RETRY_MAYFAIL;
-
- return gfp_flags;
-}
-
-/*
- * On success pages list will hold count number of correctly
- * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
- */
-int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev,
- struct ttm_operation_ctx *ctx)
-{
- struct ttm_mem_global *mem_glob = &ttm_mem_glob;
- unsigned long num_pages = ttm->num_pages;
- struct dma_pool *pool;
- struct dma_page *d_page;
- enum pool_type type;
- unsigned i;
- int ret;
-
- if (ttm_tt_is_populated(ttm))
- return 0;
-
- if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
- return -ENOMEM;
-
- INIT_LIST_HEAD(&ttm->pages_list);
- i = 0;
-
- type = ttm_to_type(ttm->page_flags, ttm->caching);
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
- goto skip_huge;
-
- pool = ttm_dma_find_pool(dev, type | IS_HUGE);
- if (!pool) {
- gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm, true);
-
- pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
- if (IS_ERR_OR_NULL(pool))
- goto skip_huge;
- }
-
- while (num_pages >= HPAGE_PMD_NR) {
- unsigned j;
-
- d_page = ttm_dma_pool_get_pages(pool, ttm, i);
- if (!d_page)
- break;
-
- ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- pool->size, ctx);
- if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm, dev);
- return -ENOMEM;
- }
-
- d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
- for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
- ttm->pages[j] = ttm->pages[j - 1] + 1;
- ttm->dma_address[j] = ttm->dma_address[j - 1] +
- PAGE_SIZE;
- }
-
- i += HPAGE_PMD_NR;
- num_pages -= HPAGE_PMD_NR;
- }
-
-skip_huge:
-#endif
-
- pool = ttm_dma_find_pool(dev, type);
- if (!pool) {
- gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm, false);
-
- pool = ttm_dma_pool_init(dev, gfp_flags, type);
- if (IS_ERR_OR_NULL(pool))
- return -ENOMEM;
- }
-
- while (num_pages) {
- d_page = ttm_dma_pool_get_pages(pool, ttm, i);
- if (!d_page) {
- ttm_dma_unpopulate(ttm, dev);
- return -ENOMEM;
- }
-
- ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- pool->size, ctx);
- if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm, dev);
- return -ENOMEM;
- }
-
- d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
- ++i;
- --num_pages;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ttm_dma_populate);
-
-/* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
-{
- struct ttm_mem_global *mem_glob = &ttm_mem_glob;
- struct dma_pool *pool;
- struct dma_page *d_page, *next;
- enum pool_type type;
- bool is_cached = false;
- unsigned count, i, npages = 0;
- unsigned long irq_flags;
-
- type = ttm_to_type(ttm->page_flags, ttm->caching);
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- pool = ttm_dma_find_pool(dev, type | IS_HUGE);
- if (pool) {
- count = 0;
- list_for_each_entry_safe(d_page, next, &ttm->pages_list,
- page_list) {
- if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
- continue;
-
- count++;
- if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
- ttm_mem_global_free_page(mem_glob, d_page->p,
- pool->size);
- d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
- }
- ttm_dma_page_put(pool, d_page);
- }
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- pool->npages_in_use -= count;
- pool->nfrees += count;
- spin_unlock_irqrestore(&pool->lock, irq_flags);
- }
-#endif
-
- pool = ttm_dma_find_pool(dev, type);
- if (!pool)
- return;
-
- is_cached = (ttm_dma_find_pool(pool->dev,
- ttm_to_type(ttm->page_flags, ttm_cached)) == pool);
-
- /* make sure pages array match list and count number of pages */
- count = 0;
- list_for_each_entry_safe(d_page, next, &ttm->pages_list,
- page_list) {
- ttm->pages[count] = d_page->p;
- count++;
-
- if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
- ttm_mem_global_free_page(mem_glob, d_page->p,
- pool->size);
- d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
- }
-
- if (is_cached)
- ttm_dma_page_put(pool, d_page);
- }
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- pool->npages_in_use -= count;
- if (is_cached) {
- pool->nfrees += count;
- } else {
- pool->npages_free += count;
- list_splice(&ttm->pages_list, &pool->free_list);
- /*
- * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
- * to free in order to minimize calls to set_memory_wb().
- */
- if (pool->npages_free >= (_manager->options.max_size +
- NUM_PAGES_TO_ALLOC))
- npages = pool->npages_free - _manager->options.max_size;
- }
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- INIT_LIST_HEAD(&ttm->pages_list);
- for (i = 0; i < ttm->num_pages; i++) {
- ttm->pages[i] = NULL;
- ttm->dma_address[i] = 0;
- }
-
- /* shrink pool if necessary (only on !is_cached pools)*/
- if (npages)
- ttm_dma_page_pool_free(pool, npages, false);
-}
-EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
-
-/**
- * Callback for mm to request pool to reduce number of page held.
- *
- * XXX: (dchinner) Deadlock warning!
- *
- * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
- * shrinkers
- */
-static unsigned long
-ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
-{
- static unsigned start_pool;
- unsigned idx = 0;
- unsigned pool_offset;
- unsigned shrink_pages = sc->nr_to_scan;
- struct device_pools *p;
- unsigned long freed = 0;
-
- if (list_empty(&_manager->pools))
- return SHRINK_STOP;
-
- if (!mutex_trylock(&_manager->lock))
- return SHRINK_STOP;
- if (!_manager->npools)
- goto out;
- pool_offset = ++start_pool % _manager->npools;
- list_for_each_entry(p, &_manager->pools, pools) {
- unsigned nr_free;
-
- if (!p->dev)
- continue;
- if (shrink_pages == 0)
- break;
- /* Do it in round-robin fashion. */
- if (++idx < pool_offset)
- continue;
- nr_free = shrink_pages;
- /* OK to use static buffer since global mutex is held. */
- shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
- freed += nr_free - shrink_pages;
-
- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
- p->pool->dev_name, p->pool->name, current->pid,
- nr_free, shrink_pages);
- }
-out:
- mutex_unlock(&_manager->lock);
- return freed;
-}
-
-static unsigned long
-ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
-{
- struct device_pools *p;
- unsigned long count = 0;
-
- if (!mutex_trylock(&_manager->lock))
- return 0;
- list_for_each_entry(p, &_manager->pools, pools)
- count += p->pool->npages_free;
- mutex_unlock(&_manager->lock);
- return count;
-}
-
-static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
-{
- manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
- manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
- manager->mm_shrink.seeks = 1;
- return register_shrinker(&manager->mm_shrink);
-}
-
-static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
-{
- unregister_shrinker(&manager->mm_shrink);
-}
-
-int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
-{
- int ret;
-
- WARN_ON(_manager);
-
- pr_info("Initializing DMA pool allocator\n");
-
- _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
- if (!_manager)
- return -ENOMEM;
-
- mutex_init(&_manager->lock);
- INIT_LIST_HEAD(&_manager->pools);
-
- _manager->options.max_size = max_pages;
- _manager->options.small = SMALL_ALLOCATION;
- _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
-
- /* This takes care of auto-freeing the _manager */
- ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
- &glob->kobj, "dma_pool");
- if (unlikely(ret != 0))
- goto error;
-
- ret = ttm_dma_pool_mm_shrink_init(_manager);
- if (unlikely(ret != 0))
- goto error;
- return 0;
-
-error:
- kobject_put(&_manager->kobj);
- _manager = NULL;
- return ret;
-}
-
-void ttm_dma_page_alloc_fini(void)
-{
- struct device_pools *p, *t;
-
- pr_info("Finalizing DMA pool allocator\n");
- ttm_dma_pool_mm_shrink_fini(_manager);
-
- list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
- dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
- current->pid);
- WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
- ttm_dma_pool_match, p->pool));
- ttm_dma_free_pool(p->dev, p->pool->type);
- }
- kobject_put(&_manager->kobj);
- _manager = NULL;
-}
-
-int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
-{
- struct device_pools *p;
- struct dma_pool *pool = NULL;
-
- if (!_manager) {
- seq_printf(m, "No pool allocator running.\n");
- return 0;
- }
- seq_printf(m, " pool refills pages freed inuse available name\n");
- mutex_lock(&_manager->lock);
- list_for_each_entry(p, &_manager->pools, pools) {
- struct device *dev = p->dev;
- if (!dev)
- continue;
- pool = p->pool;
- seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
- pool->name, pool->nrefills,
- pool->nfrees, pool->npages_in_use,
- pool->npages_free,
- pool->dev_name);
- }
- mutex_unlock(&_manager->lock);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
new file mode 100644
index 000000000000..44ec41aa78d6
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -0,0 +1,667 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+/* Pooling of allocated pages is necessary because changing the caching
+ * attributes on x86 of the linear mapping requires a costly cross CPU TLB
+ * invalidate for those addresses.
+ *
+ * Additional to that allocations from the DMA coherent API are pooled as well
+ * cause they are rather slow compared to alloc_pages+map.
+ */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
+
+#include <drm/ttm/ttm_pool.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_tt.h>
+
+/**
+ * struct ttm_pool_dma - Helper object for coherent DMA mappings
+ *
+ * @addr: original DMA address returned for the mapping
+ * @vaddr: original vaddr return for the mapping and order in the lower bits
+ */
+struct ttm_pool_dma {
+ dma_addr_t addr;
+ unsigned long vaddr;
+};
+
+static unsigned long page_pool_size;
+
+MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
+module_param(page_pool_size, ulong, 0644);
+
+static atomic_long_t allocated_pages;
+
+static struct ttm_pool_type global_write_combined[MAX_ORDER];
+static struct ttm_pool_type global_uncached[MAX_ORDER];
+
+static spinlock_t shrinker_lock;
+static struct list_head shrinker_list;
+static struct shrinker mm_shrinker;
+
+/* Allocate pages of size 1 << order with the given gfp_flags */
+static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
+ unsigned int order)
+{
+ unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
+ struct ttm_pool_dma *dma;
+ struct page *p;
+ void *vaddr;
+
+ if (order) {
+ gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+ __GFP_KSWAPD_RECLAIM;
+ gfp_flags &= ~__GFP_MOVABLE;
+ gfp_flags &= ~__GFP_COMP;
+ }
+
+ if (!pool->use_dma_alloc) {
+ p = alloc_pages(gfp_flags, order);
+ if (p)
+ p->private = order;
+ return p;
+ }
+
+ dma = kmalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return NULL;
+
+ if (order)
+ attr |= DMA_ATTR_NO_WARN;
+
+ vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
+ &dma->addr, gfp_flags, attr);
+ if (!vaddr)
+ goto error_free;
+
+ /* TODO: This is an illegal abuse of the DMA API, but we need to rework
+ * TTM page fault handling and extend the DMA API to clean this up.
+ */
+ if (is_vmalloc_addr(vaddr))
+ p = vmalloc_to_page(vaddr);
+ else
+ p = virt_to_page(vaddr);
+
+ dma->vaddr = (unsigned long)vaddr | order;
+ p->private = (unsigned long)dma;
+ return p;
+
+error_free:
+ kfree(dma);
+ return NULL;
+}
+
+/* Reset the caching and pages of size 1 << order */
+static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
+ unsigned int order, struct page *p)
+{
+ unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
+ struct ttm_pool_dma *dma;
+ void *vaddr;
+
+#ifdef CONFIG_X86
+ /* We don't care that set_pages_wb is inefficient here. This is only
+ * used when we have to shrink and CPU overhead is irrelevant then.
+ */
+ if (caching != ttm_cached && !PageHighMem(p))
+ set_pages_wb(p, 1 << order);
+#endif
+
+ if (!pool->use_dma_alloc) {
+ __free_pages(p, order);
+ return;
+ }
+
+ if (order)
+ attr |= DMA_ATTR_NO_WARN;
+
+ dma = (void *)p->private;
+ vaddr = (void *)(dma->vaddr & PAGE_MASK);
+ dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
+ attr);
+ kfree(dma);
+}
+
+/* Apply a new caching to an array of pages */
+static int ttm_pool_apply_caching(struct page **first, struct page **last,
+ enum ttm_caching caching)
+{
+#ifdef CONFIG_X86
+ unsigned int num_pages = last - first;
+
+ if (!num_pages)
+ return 0;
+
+ switch (caching) {
+ case ttm_cached:
+ break;
+ case ttm_write_combined:
+ return set_pages_array_wc(first, num_pages);
+ case ttm_uncached:
+ return set_pages_array_uc(first, num_pages);
+ }
+#endif
+ return 0;
+}
+
+/* Map pages of 1 << order size and fill the DMA address array */
+static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
+ struct page *p, dma_addr_t **dma_addr)
+{
+ dma_addr_t addr;
+ unsigned int i;
+
+ if (pool->use_dma_alloc) {
+ struct ttm_pool_dma *dma = (void *)p->private;
+
+ addr = dma->addr;
+ } else {
+ size_t size = (1ULL << order) * PAGE_SIZE;
+
+ addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(pool->dev, **dma_addr))
+ return -EFAULT;
+ }
+
+ for (i = 1 << order; i ; --i) {
+ *(*dma_addr)++ = addr;
+ addr += PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+/* Unmap pages of 1 << order size */
+static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
+ unsigned int num_pages)
+{
+ /* Unmapped while freeing the page */
+ if (pool->use_dma_alloc)
+ return;
+
+ dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
+ DMA_BIDIRECTIONAL);
+}
+
+/* Give pages into a specific pool_type */
+static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
+{
+ spin_lock(&pt->lock);
+ list_add(&p->lru, &pt->pages);
+ spin_unlock(&pt->lock);
+ atomic_long_add(1 << pt->order, &allocated_pages);
+}
+
+/* Take pages from a specific pool_type, return NULL when nothing available */
+static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
+{
+ struct page *p;
+
+ spin_lock(&pt->lock);
+ p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
+ if (p) {
+ atomic_long_sub(1 << pt->order, &allocated_pages);
+ list_del(&p->lru);
+ }
+ spin_unlock(&pt->lock);
+
+ return p;
+}
+
+/* Count the number of pages available in a pool_type */
+static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
+{
+ unsigned int count = 0;
+ struct page *p;
+
+ spin_lock(&pt->lock);
+ /* Only used for debugfs, the overhead doesn't matter */
+ list_for_each_entry(p, &pt->pages, lru)
+ ++count;
+ spin_unlock(&pt->lock);
+
+ return count;
+}
+
+/* Initialize and add a pool type to the global shrinker list */
+static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
+ enum ttm_caching caching, unsigned int order)
+{
+ pt->pool = pool;
+ pt->caching = caching;
+ pt->order = order;
+ spin_lock_init(&pt->lock);
+ INIT_LIST_HEAD(&pt->pages);
+
+ spin_lock(&shrinker_lock);
+ list_add_tail(&pt->shrinker_list, &shrinker_list);
+ spin_unlock(&shrinker_lock);
+}
+
+/* Remove a pool_type from the global shrinker list and free all pages */
+static void ttm_pool_type_fini(struct ttm_pool_type *pt)
+{
+ struct page *p, *tmp;
+
+ spin_lock(&shrinker_lock);
+ list_del(&pt->shrinker_list);
+ spin_unlock(&shrinker_lock);
+
+ list_for_each_entry_safe(p, tmp, &pt->pages, lru)
+ ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+}
+
+/* Return the pool_type to use for the given caching and order */
+static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
+ enum ttm_caching caching,
+ unsigned int order)
+{
+ if (pool->use_dma_alloc)
+ return &pool->caching[caching].orders[order];
+
+#ifdef CONFIG_X86
+ switch (caching) {
+ case ttm_write_combined:
+ return &global_write_combined[order];
+ case ttm_uncached:
+ return &global_uncached[order];
+ default:
+ break;
+ }
+#endif
+
+ return NULL;
+}
+
+/* Free pages using the global shrinker list */
+static unsigned int ttm_pool_shrink(void)
+{
+ struct ttm_pool_type *pt;
+ unsigned int num_freed;
+ struct page *p;
+
+ spin_lock(&shrinker_lock);
+ pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
+
+ p = ttm_pool_type_take(pt);
+ if (p) {
+ ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+ num_freed = 1 << pt->order;
+ } else {
+ num_freed = 0;
+ }
+
+ list_move_tail(&pt->shrinker_list, &shrinker_list);
+ spin_unlock(&shrinker_lock);
+
+ return num_freed;
+}
+
+/* Return the allocation order based for a page */
+static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
+{
+ if (pool->use_dma_alloc) {
+ struct ttm_pool_dma *dma = (void *)p->private;
+
+ return dma->vaddr & ~PAGE_MASK;
+ }
+
+ return p->private;
+}
+
+/**
+ * ttm_pool_alloc - Fill a ttm_tt object
+ *
+ * @pool: ttm_pool to use
+ * @tt: ttm_tt object to fill
+ * @ctx: operation context
+ *
+ * Fill the ttm_tt object with pages and also make sure to DMA map them when
+ * necessary.
+ *
+ * Returns: 0 on successe, negative error code otherwise.
+ */
+int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ struct ttm_operation_ctx *ctx)
+{
+ unsigned long num_pages = tt->num_pages;
+ dma_addr_t *dma_addr = tt->dma_address;
+ struct page **caching = tt->pages;
+ struct page **pages = tt->pages;
+ gfp_t gfp_flags = GFP_USER;
+ unsigned int i, order;
+ struct page *p;
+ int r;
+
+ WARN_ON(!num_pages || ttm_tt_is_populated(tt));
+ WARN_ON(dma_addr && !pool->dev);
+
+ if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ if (ctx->gfp_retry_mayfail)
+ gfp_flags |= __GFP_RETRY_MAYFAIL;
+
+ if (pool->use_dma32)
+ gfp_flags |= GFP_DMA32;
+ else
+ gfp_flags |= GFP_HIGHUSER;
+
+ for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages;
+ order = min_t(unsigned int, order, __fls(num_pages))) {
+ bool apply_caching = false;
+ struct ttm_pool_type *pt;
+
+ pt = ttm_pool_select_type(pool, tt->caching, order);
+ p = pt ? ttm_pool_type_take(pt) : NULL;
+ if (p) {
+ apply_caching = true;
+ } else {
+ p = ttm_pool_alloc_page(pool, gfp_flags, order);
+ if (p && PageHighMem(p))
+ apply_caching = true;
+ }
+
+ if (!p) {
+ if (order) {
+ --order;
+ continue;
+ }
+ r = -ENOMEM;
+ goto error_free_all;
+ }
+
+ if (apply_caching) {
+ r = ttm_pool_apply_caching(caching, pages,
+ tt->caching);
+ if (r)
+ goto error_free_page;
+ caching = pages + (1 << order);
+ }
+
+ r = ttm_mem_global_alloc_page(&ttm_mem_glob, p,
+ (1 << order) * PAGE_SIZE,
+ ctx);
+ if (r)
+ goto error_free_page;
+
+ if (dma_addr) {
+ r = ttm_pool_map(pool, order, p, &dma_addr);
+ if (r)
+ goto error_global_free;
+ }
+
+ num_pages -= 1 << order;
+ for (i = 1 << order; i; --i)
+ *(pages++) = p++;
+ }
+
+ r = ttm_pool_apply_caching(caching, pages, tt->caching);
+ if (r)
+ goto error_free_all;
+
+ return 0;
+
+error_global_free:
+ ttm_mem_global_free_page(&ttm_mem_glob, p, (1 << order) * PAGE_SIZE);
+
+error_free_page:
+ ttm_pool_free_page(pool, tt->caching, order, p);
+
+error_free_all:
+ num_pages = tt->num_pages - num_pages;
+ for (i = 0; i < num_pages; ) {
+ order = ttm_pool_page_order(pool, tt->pages[i]);
+ ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
+ i += 1 << order;
+ }
+
+ return r;
+}
+EXPORT_SYMBOL(ttm_pool_alloc);
+
+/**
+ * ttm_pool_free - Free the backing pages from a ttm_tt object
+ *
+ * @pool: Pool to give pages back to.
+ * @tt: ttm_tt object to unpopulate
+ *
+ * Give the packing pages back to a pool or free them
+ */
+void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
+{
+ unsigned int i;
+
+ for (i = 0; i < tt->num_pages; ) {
+ struct page *p = tt->pages[i];
+ unsigned int order, num_pages;
+ struct ttm_pool_type *pt;
+
+ order = ttm_pool_page_order(pool, p);
+ num_pages = 1ULL << order;
+ ttm_mem_global_free_page(&ttm_mem_glob, p,
+ num_pages * PAGE_SIZE);
+ if (tt->dma_address)
+ ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
+
+ pt = ttm_pool_select_type(pool, tt->caching, order);
+ if (pt)
+ ttm_pool_type_give(pt, tt->pages[i]);
+ else
+ ttm_pool_free_page(pool, tt->caching, order,
+ tt->pages[i]);
+
+ i += num_pages;
+ }
+
+ while (atomic_long_read(&allocated_pages) > page_pool_size)
+ ttm_pool_shrink();
+}
+EXPORT_SYMBOL(ttm_pool_free);
+
+/**
+ * ttm_pool_init - Initialize a pool
+ *
+ * @pool: the pool to initialize
+ * @dev: device for DMA allocations and mappings
+ * @use_dma_alloc: true if coherent DMA alloc should be used
+ * @use_dma32: true if GFP_DMA32 should be used
+ *
+ * Initialize the pool and its pool types.
+ */
+void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
+ bool use_dma_alloc, bool use_dma32)
+{
+ unsigned int i, j;
+
+ WARN_ON(!dev && use_dma_alloc);
+
+ pool->dev = dev;
+ pool->use_dma_alloc = use_dma_alloc;
+ pool->use_dma32 = use_dma32;
+
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
+ for (j = 0; j < MAX_ORDER; ++j)
+ ttm_pool_type_init(&pool->caching[i].orders[j],
+ pool, i, j);
+}
+EXPORT_SYMBOL(ttm_pool_init);
+
+/**
+ * ttm_pool_fini - Cleanup a pool
+ *
+ * @pool: the pool to clean up
+ *
+ * Free all pages in the pool and unregister the types from the global
+ * shrinker.
+ */
+void ttm_pool_fini(struct ttm_pool *pool)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
+ for (j = 0; j < MAX_ORDER; ++j)
+ ttm_pool_type_fini(&pool->caching[i].orders[j]);
+}
+EXPORT_SYMBOL(ttm_pool_fini);
+
+#ifdef CONFIG_DEBUG_FS
+
+/* Dump information about the different pool types */
+static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
+ struct seq_file *m)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_ORDER; ++i)
+ seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
+ seq_puts(m, "\n");
+}
+
+/**
+ * ttm_pool_debugfs - Debugfs dump function for a pool
+ *
+ * @pool: the pool to dump the information for
+ * @m: seq_file to dump to
+ *
+ * Make a debugfs dump with the per pool and global information.
+ */
+int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
+{
+ unsigned int i;
+
+ spin_lock(&shrinker_lock);
+
+ seq_puts(m, "\t ");
+ for (i = 0; i < MAX_ORDER; ++i)
+ seq_printf(m, " ---%2u---", i);
+ seq_puts(m, "\n");
+
+ seq_puts(m, "wc\t:");
+ ttm_pool_debugfs_orders(global_write_combined, m);
+ seq_puts(m, "uc\t:");
+ ttm_pool_debugfs_orders(global_uncached, m);
+
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ seq_puts(m, "DMA ");
+ switch (i) {
+ case ttm_cached:
+ seq_puts(m, "\t:");
+ break;
+ case ttm_write_combined:
+ seq_puts(m, "wc\t:");
+ break;
+ case ttm_uncached:
+ seq_puts(m, "uc\t:");
+ break;
+ }
+ ttm_pool_debugfs_orders(pool->caching[i].orders, m);
+ }
+
+ seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
+ atomic_long_read(&allocated_pages), page_pool_size);
+
+ spin_unlock(&shrinker_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_pool_debugfs);
+
+#endif
+
+/* As long as pages are available make sure to release at least one */
+static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ unsigned long num_freed = 0;
+
+ do
+ num_freed += ttm_pool_shrink();
+ while (!num_freed && atomic_long_read(&allocated_pages));
+
+ return num_freed;
+}
+
+/* Return the number of pages available or SHRINK_EMPTY if we have none */
+static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ unsigned long num_pages = atomic_long_read(&allocated_pages);
+
+ return num_pages ? num_pages : SHRINK_EMPTY;
+}
+
+/**
+ * ttm_pool_mgr_init - Initialize globals
+ *
+ * @num_pages: default number of pages
+ *
+ * Initialize the global locks and lists for the MM shrinker.
+ */
+int ttm_pool_mgr_init(unsigned long num_pages)
+{
+ unsigned int i;
+
+ if (!page_pool_size)
+ page_pool_size = num_pages;
+
+ spin_lock_init(&shrinker_lock);
+ INIT_LIST_HEAD(&shrinker_list);
+
+ for (i = 0; i < MAX_ORDER; ++i) {
+ ttm_pool_type_init(&global_write_combined[i], NULL,
+ ttm_write_combined, i);
+ ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
+ }
+
+ mm_shrinker.count_objects = ttm_pool_shrinker_count;
+ mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
+ mm_shrinker.seeks = 1;
+ return register_shrinker(&mm_shrinker);
+}
+
+/**
+ * ttm_pool_mgr_fini - Finalize globals
+ *
+ * Cleanup the global pools and unregister the MM shrinker.
+ */
+void ttm_pool_mgr_fini(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_ORDER; ++i) {
+ ttm_pool_type_fini(&global_write_combined[i]);
+ ttm_pool_type_fini(&global_uncached[i]);
+ }
+
+ unregister_shrinker(&mm_shrinker);
+ WARN_ON(!list_empty(&shrinker_list));
+}
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 4ebc043e2867..b60699bf4816 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -89,7 +89,7 @@ int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev,
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
- .flags = TTM_OPT_FLAG_FORCE_ALLOC
+ .force_alloc = true
};
struct ttm_bo_global *glob = &ttm_bo_glob;
struct dma_fence *fence;
diff --git a/drivers/gpu/drm/ttm/ttm_set_memory.h b/drivers/gpu/drm/ttm/ttm_set_memory.h
deleted file mode 100644
index 2343c18a6133..000000000000
--- a/drivers/gpu/drm/ttm/ttm_set_memory.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2018 Advanced Micro Devices, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Huang Rui <ray.huang@amd.com>
- */
-
-#ifndef TTM_SET_MEMORY
-#define TTM_SET_MEMORY
-
-#include <linux/mm.h>
-
-#ifdef CONFIG_X86
-
-#include <asm/set_memory.h>
-
-static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
-{
- return set_pages_array_wb(pages, addrinarray);
-}
-
-static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
-{
- return set_pages_array_wc(pages, addrinarray);
-}
-
-static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
-{
- return set_pages_array_uc(pages, addrinarray);
-}
-
-static inline int ttm_set_pages_wb(struct page *page, int numpages)
-{
- return set_pages_wb(page, numpages);
-}
-
-#else /* for CONFIG_X86 */
-
-static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_wb(struct page *page, int numpages)
-{
- return 0;
-}
-
-#endif /* for CONFIG_X86 */
-
-#endif
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 65c4254eea5c..cfd633c7e764 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -37,7 +37,6 @@
#include <linux/file.h>
#include <drm/drm_cache.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_page_alloc.h>
/**
* Allocates a ttm structure for the given BO.
@@ -52,12 +51,6 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
if (bo->ttm)
return 0;
- if (bdev->need_dma32)
- page_flags |= TTM_PAGE_FLAG_DMA32;
-
- if (bdev->no_retry)
- page_flags |= TTM_PAGE_FLAG_NO_RETRY;
-
switch (bo->type) {
case ttm_bo_type_device:
if (zero_alloc)
@@ -142,7 +135,6 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
ttm->dma_address = NULL;
ttm->swap_storage = NULL;
ttm->sg = bo->sg;
- INIT_LIST_HEAD(&ttm->pages_list);
ttm->caching = caching;
}
@@ -216,8 +208,6 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
swap_space = swap_storage->f_mapping;
gfp_mask = mapping_gfp_mask(swap_space);
- if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
- gfp_mask |= __GFP_RETRY_MAYFAIL;
for (i = 0; i < ttm->num_pages; ++i) {
from_page = shmem_read_mapping_page_gfp(swap_space, i,
@@ -265,8 +255,6 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
swap_space = swap_storage->f_mapping;
gfp_mask = mapping_gfp_mask(swap_space);
- if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
- gfp_mask |= __GFP_RETRY_MAYFAIL;
for (i = 0; i < ttm->num_pages; ++i) {
from_page = ttm->pages[i];
@@ -321,7 +309,7 @@ int ttm_tt_populate(struct ttm_bo_device *bdev,
if (bdev->driver->ttm_tt_populate)
ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx);
else
- ret = ttm_pool_populate(ttm, ctx);
+ ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
if (ret)
return ret;
@@ -363,6 +351,6 @@ void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
if (bdev->driver->ttm_tt_unpopulate)
bdev->driver->ttm_tt_unpopulate(bdev, ttm);
else
- ttm_pool_unpopulate(ttm);
+ ttm_pool_free(&bdev->pool, ttm);
ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 931c55126148..322bf7133ba1 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -223,7 +223,7 @@ static void vbox_crtc_atomic_disable(struct drm_crtc *crtc,
}
static void vbox_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index f04f5cc8c839..ea710beb8e00 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -584,18 +584,21 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
}
static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
- struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct drm_connector *conn;
struct drm_connector_state *conn_state;
int ret, i;
- ret = vc4_hvs_atomic_check(crtc, state);
+ ret = vc4_hvs_atomic_check(crtc, crtc_state);
if (ret)
return ret;
- for_each_new_connector_in_state(state->state, conn, conn_state, i) {
+ for_each_new_connector_in_state(state, conn, conn_state,
+ i) {
if (conn_state->crtc != crtc)
continue;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 7003e7f14a48..5d3d8ed0b775 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -916,7 +916,8 @@ int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output);
int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
-void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *state);
+void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
void vc4_hvs_dump_state(struct drm_device *dev);
void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel);
void vc4_hvs_mask_underrun(struct drm_device *dev, int channel);
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 4d0a833366ce..0bd5ea435120 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -414,8 +414,10 @@ void vc4_hvs_atomic_disable(struct drm_crtc *crtc,
}
void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index e0e0b72ea65c..34612edcabbd 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -386,16 +386,18 @@ static const struct drm_crtc_funcs vc4_txp_crtc_funcs = {
};
static int vc4_txp_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
- struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
int ret;
- ret = vc4_hvs_atomic_check(crtc, state);
+ ret = vc4_hvs_atomic_check(crtc, crtc_state);
if (ret)
return ret;
- state->no_vblank = true;
+ crtc_state->no_vblank = true;
vc4_state->feed_txp = true;
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 48b3194ee051..4bf74836bd53 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -111,13 +111,13 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
}
static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
return 0;
}
static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index e43e4e1b268a..0443b7deeaef 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -168,9 +168,11 @@ static const struct drm_crtc_funcs vkms_crtc_funcs = {
};
static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
- struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(crtc_state);
struct drm_plane *plane;
struct drm_plane_state *plane_state;
int i = 0, ret;
@@ -178,12 +180,12 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
if (vkms_state->active_planes)
return 0;
- ret = drm_atomic_add_affected_planes(state->state, crtc);
+ ret = drm_atomic_add_affected_planes(crtc_state->state, crtc);
if (ret < 0)
return ret;
- drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
- plane_state = drm_atomic_get_existing_plane_state(state->state,
+ drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
+ plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
plane);
WARN_ON(!plane_state);
@@ -199,8 +201,8 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
vkms_state->num_active_planes = i;
i = 0;
- drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
- plane_state = drm_atomic_get_existing_plane_state(state->state,
+ drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
+ plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
plane);
if (!plane_state->visible)
@@ -226,7 +228,7 @@ static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
}
static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
@@ -237,7 +239,7 @@ static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
}
static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 4860370740e0..0c42d2c05f43 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -595,10 +595,6 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
else
dev_priv->map_mode = vmw_dma_map_populate;
- if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) &&
- (dev_priv->map_mode == vmw_dma_alloc_coherent))
- return -EINVAL;
-
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
return 0;
}
@@ -798,8 +794,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (unlikely(ret != 0))
goto out_err0;
- dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK,
- SCATTERLIST_MAX_SEGMENT));
+ dma_set_max_seg_size(dev->dev, U32_MAX);
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max GMR ids is %u\n",
@@ -878,10 +873,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
drm_vma_offset_manager_init(&dev_priv->vma_manager,
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
- ret = ttm_bo_device_init(&dev_priv->bdev,
- &vmw_bo_driver,
+ ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver,
+ dev_priv->dev->dev,
dev->anon_inode->i_mapping,
&dev_priv->vma_manager,
+ dev_priv->map_mode == vmw_dma_alloc_coherent,
false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 312ed0881a99..bc67f2b930e1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -522,8 +522,10 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *new_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
int connector_mask = drm_connector_mask(&du->connector);
bool has_primary = new_state->plane_mask &
@@ -552,13 +554,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
}
void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct drm_pending_vblank_event *event = crtc->state->event;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 3ee03227607c..03f3694015ce 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -473,11 +473,11 @@ void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
bool unreference);
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state);
+ struct drm_atomic_state *state);
void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state);
+ struct drm_atomic_state *state);
void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state);
+ struct drm_atomic_state *state);
void vmw_du_crtc_reset(struct drm_crtc *crtc);
struct drm_crtc_state *vmw_du_crtc_duplicate_state(struct drm_crtc *crtc);
void vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 33e3aa5b18f8..51f70bea41cc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -28,7 +28,6 @@
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_page_alloc.h>
static const struct ttm_place vram_placement_flags = {
.fpfn = 0,
@@ -576,30 +575,11 @@ static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
static int vmw_ttm_populate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- struct vmw_ttm_tt *vmw_tt =
- container_of(ttm, struct vmw_ttm_tt, dma_ttm);
- struct vmw_private *dev_priv = vmw_tt->dev_priv;
- struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
- int ret;
-
+ /* TODO: maybe completely drop this ? */
if (ttm_tt_is_populated(ttm))
return 0;
- if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
- size_t size =
- ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
- ret = ttm_mem_global_alloc(glob, size, ctx);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
- ctx);
- if (unlikely(ret != 0))
- ttm_mem_global_free(glob, size);
- } else
- ret = ttm_pool_populate(ttm, ctx);
-
- return ret;
+ return ttm_pool_alloc(&bdev->pool, ttm, ctx);
}
static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
@@ -607,9 +587,6 @@ static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm);
- struct vmw_private *dev_priv = vmw_tt->dev_priv;
- struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
-
if (vmw_tt->mob) {
vmw_mob_destroy(vmw_tt->mob);
@@ -617,14 +594,7 @@ static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
}
vmw_ttm_unmap_dma(vmw_tt);
- if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
- size_t size =
- ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
-
- ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
- ttm_mem_global_free(glob, size);
- } else
- ttm_pool_unpopulate(ttm);
+ ttm_pool_free(&bdev->pool, ttm);
}
static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index 0b3bd62e7631..c685d94409b0 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -28,7 +28,6 @@
#include <linux/dmaengine.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
@@ -1316,8 +1315,7 @@ static int zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp,
snprintf(dma_channel_name, sizeof(dma_channel_name),
"%s%u", dma_names[layer->id], i);
- dma->chan = of_dma_request_slave_channel(disp->dev->of_node,
- dma_channel_name);
+ dma->chan = dma_request_chan(disp->dev, dma_channel_name);
if (IS_ERR(dma->chan)) {
dev_err(disp->dev, "failed to request dma channel\n");
ret = PTR_ERR(dma->chan);
@@ -1506,21 +1504,21 @@ zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc,
}
static int zynqmp_disp_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
- return drm_atomic_add_affected_planes(state->state, crtc);
+ return drm_atomic_add_affected_planes(state, crtc);
}
static void
zynqmp_disp_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
drm_crtc_vblank_on(crtc);
}
static void
zynqmp_disp_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
if (crtc->state->event) {
struct drm_pending_vblank_event *event;
diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c
index d2a529eba3c9..904f62f3bfc1 100644
--- a/drivers/gpu/drm/zte/zx_vou.c
+++ b/drivers/gpu/drm/zte/zx_vou.c
@@ -473,7 +473,7 @@ static void zx_crtc_atomic_disable(struct drm_crtc *crtc,
}
static void zx_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_pending_vblank_event *event = crtc->state->event;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index a7a9bc08dcd1..bcfbd0e44a4a 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -417,7 +417,13 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
}
- if (prot & IOMMU_CACHE)
+ /*
+ * Also Mali has its own notions of shareability wherein its Inner
+ * domain covers the cores within the GPU, and its Outer domain is
+ * "outside the GPU" (i.e. either the Inner or System domain in CPU
+ * terms, depending on coherency).
+ */
+ if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
pte |= ARM_LPAE_PTE_SH_IS;
else
pte |= ARM_LPAE_PTE_SH_OS;
@@ -1021,6 +1027,9 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
ARM_MALI_LPAE_TTBR_READ_INNER |
ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
+ if (cfg->coherent_walk)
+ cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
+
return &data->iop;
out_free_data:
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index 6a26a364f9bd..d1bb5915082b 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -502,7 +502,7 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
if (!fbfont)
return NULL;
- pr_info("STI selected %dx%d framebuffer font %s for sticon\n",
+ pr_info("STI selected %ux%u framebuffer font %s for sticon\n",
fbfont->width, fbfont->height, fbfont->name);
bpc = ((fbfont->width+7)/8) * fbfont->height;
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index f253daa05d9d..e3812a8ff55a 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -240,14 +240,6 @@ static int *MV300_reg = MV300_reg_8bit;
static int inverse;
-extern int fontheight_8x8;
-extern int fontwidth_8x8;
-extern unsigned char fontdata_8x8[];
-
-extern int fontheight_8x16;
-extern int fontwidth_8x16;
-extern unsigned char fontdata_8x16[];
-
/*
* struct fb_ops {
* * open/release and usage marking
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index c0952cc96bdb..aa4ebe3192ec 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -16,7 +16,6 @@
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/fbcon.h>
-#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioctl.h>
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index b7a1d6fae90d..1e4cb63d0d11 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -221,14 +221,18 @@ EXPORT_SYMBOL(hdmi_avi_infoframe_pack);
int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
const char *vendor, const char *product)
{
+ size_t len;
+
memset(frame, 0, sizeof(*frame));
frame->type = HDMI_INFOFRAME_TYPE_SPD;
frame->version = 1;
frame->length = HDMI_SPD_INFOFRAME_SIZE;
- strncpy(frame->vendor, vendor, sizeof(frame->vendor));
- strncpy(frame->product, product, sizeof(frame->product));
+ len = strlen(vendor);
+ memcpy(frame->vendor, vendor, min(len, sizeof(frame->vendor)));
+ len = strlen(product);
+ memcpy(frame->product, product, min(len, sizeof(frame->product)));
return 0;
}
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index d07c851d255b..413fd0ca56a8 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -308,7 +308,6 @@ struct __drm_private_objs_state {
* struct drm_atomic_state - the global state object for atomic updates
* @ref: count of all references to this state (will not be freed until zero)
* @dev: parent DRM device
- * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
* @async_update: hint for asynchronous plane update
* @planes: pointer to array of structures with per-plane data
* @crtcs: pointer to array of CRTC pointers
@@ -336,6 +335,17 @@ struct drm_atomic_state {
* drm_atomic_crtc_needs_modeset().
*/
bool allow_modeset : 1;
+ /**
+ * @legacy_cursor_update:
+ *
+ * Hint to enforce legacy cursor IOCTL semantics.
+ *
+ * WARNING: This is thoroughly broken and pretty much impossible to
+ * implement correctly. Drivers must ignore this and should instead
+ * implement &drm_plane_helper_funcs.atomic_async_check and
+ * &drm_plane_helper_funcs.atomic_async_commit hooks. New users of this
+ * flag are not allowed.
+ */
bool legacy_cursor_update : 1;
bool async_update : 1;
/**
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index bde42988c4b5..f2de050085be 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -336,8 +336,7 @@ struct drm_crtc_helper_funcs {
*
* This function is called in the check phase of an atomic update. The
* driver is not allowed to change anything outside of the free-standing
- * state objects passed-in or assembled in the overall &drm_atomic_state
- * update tracking structure.
+ * state object passed-in.
*
* Also beware that userspace can request its own custom modes, neither
* core nor helpers filter modes to the list of probe modes reported by
@@ -353,7 +352,7 @@ struct drm_crtc_helper_funcs {
* deadlock.
*/
int (*atomic_check)(struct drm_crtc *crtc,
- struct drm_crtc_state *state);
+ struct drm_atomic_state *state);
/**
* @atomic_begin:
@@ -374,7 +373,7 @@ struct drm_crtc_helper_funcs {
* transitional plane helpers, but it is optional.
*/
void (*atomic_begin)(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state);
+ struct drm_atomic_state *state);
/**
* @atomic_flush:
*
@@ -398,7 +397,7 @@ struct drm_crtc_helper_funcs {
* transitional plane helpers, but it is optional.
*/
void (*atomic_flush)(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state);
+ struct drm_atomic_state *state);
/**
* @atomic_enable:
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 37102e45e496..5ddad88ae6ed 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -195,8 +195,12 @@ struct ttm_bo_kmap_obj {
*
* @interruptible: Sleep interruptible if sleeping.
* @no_wait_gpu: Return immediately if the GPU is busy.
+ * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
+ * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple
+ * BOs share the same reservation object.
+ * @force_alloc: Don't check the memory account during suspend or CPU page
+ * faults. Should only be used by TTM internally.
* @resv: Reservation object to allow reserved evictions with.
- * @flags: Including the following flags
*
* Context for TTM operations like changing buffer placement or general memory
* allocation.
@@ -204,16 +208,13 @@ struct ttm_bo_kmap_obj {
struct ttm_operation_ctx {
bool interruptible;
bool no_wait_gpu;
+ bool gfp_retry_mayfail;
+ bool allow_res_evict;
+ bool force_alloc;
struct dma_resv *resv;
uint64_t bytes_moved;
- uint32_t flags;
};
-/* Allow eviction of reserved BOs */
-#define TTM_OPT_FLAG_ALLOW_RES_EVICT 0x1
-/* when serving page fault or suspend, allow alloc anyway */
-#define TTM_OPT_FLAG_FORCE_ALLOC 0x2
-
/**
* ttm_bo_get - reference a struct ttm_buffer_object
*
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 29f6a1d1c853..da8208f43378 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -42,6 +42,7 @@
#include "ttm_module.h"
#include "ttm_placement.h"
#include "ttm_tt.h"
+#include "ttm_pool.h"
/**
* struct ttm_bo_driver
@@ -275,7 +276,6 @@ extern struct ttm_bo_global {
* @dev_mapping: A pointer to the struct address_space representing the
* device address space.
* @wq: Work queue structure for the delayed delete workqueue.
- * @no_retry: Don't retry allocation if it fails
*
*/
@@ -295,6 +295,7 @@ struct ttm_bo_device {
* Protected by internal locks.
*/
struct drm_vma_offset_manager *vma_manager;
+ struct ttm_pool pool;
/*
* Protected by the global:lru lock.
@@ -312,10 +313,6 @@ struct ttm_bo_device {
*/
struct delayed_work wq;
-
- bool need_dma32;
-
- bool no_retry;
};
static inline struct ttm_resource_manager *ttm_manager_type(struct ttm_bo_device *bdev,
@@ -395,11 +392,11 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
* @bdev: A pointer to a struct ttm_bo_device to initialize.
* @glob: A pointer to an initialized struct ttm_bo_global.
* @driver: A pointer to a struct ttm_bo_driver set up by the caller.
+ * @dev: The core kernel device pointer for DMA mappings and allocations.
* @mapping: The address space to use for this bo.
* @vma_manager: A pointer to a vma manager.
- * @file_page_offset: Offset into the device address space that is available
- * for buffer data. This ensures compatibility with other users of the
- * address space.
+ * @use_dma_alloc: If coherent DMA allocation API should be used.
+ * @use_dma32: If we should use GFP_DMA32 for device memory allocations.
*
* Initializes a struct ttm_bo_device:
* Returns:
@@ -407,9 +404,10 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
*/
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
+ struct device *dev,
struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager,
- bool need_dma32);
+ bool use_dma_alloc, bool use_dma32);
/**
* ttm_bo_unmap_virtual
diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h
index 161624dcf6be..a0b4a49fa432 100644
--- a/include/drm/ttm/ttm_caching.h
+++ b/include/drm/ttm/ttm_caching.h
@@ -25,6 +25,8 @@
#ifndef _TTM_CACHING_H_
#define _TTM_CACHING_H_
+#define TTM_NUM_CACHING_TYPES 3
+
enum ttm_caching {
ttm_uncached,
ttm_write_combined,
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
deleted file mode 100644
index 8fa1e7df6213..000000000000
--- a/include/drm/ttm/ttm_page_alloc.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) Red Hat Inc.
-
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie <airlied@redhat.com>
- * Jerome Glisse <jglisse@redhat.com>
- */
-#ifndef TTM_PAGE_ALLOC
-#define TTM_PAGE_ALLOC
-
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_memory.h>
-
-struct device;
-
-/**
- * Initialize pool allocator.
- */
-int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
-/**
- * Free pool allocator.
- */
-void ttm_page_alloc_fini(void);
-
-/**
- * ttm_pool_populate:
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Add backing pages to all of @ttm
- */
-int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
-
-/**
- * ttm_pool_unpopulate:
- *
- * @ttm: The struct ttm_tt which to free backing pages.
- *
- * Free all pages of @ttm
- */
-void ttm_pool_unpopulate(struct ttm_tt *ttm);
-
-/**
- * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
- */
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_tt *tt,
- struct ttm_operation_ctx *ctx);
-
-/**
- * Unpopulates and DMA unmaps pages as part of a
- * ttm_dma_unpopulate() request */
-void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_tt *tt);
-
-/**
- * Output the state of pools to debugfs file
- */
-int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
-
-#if defined(CONFIG_DRM_TTM_DMA_PAGE_POOL)
-/**
- * Initialize pool allocator.
- */
-int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
-
-/**
- * Free pool allocator.
- */
-void ttm_dma_page_alloc_fini(void);
-
-/**
- * Output the state of pools to debugfs file
- */
-int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
-
-int ttm_dma_populate(struct ttm_tt *ttm_dma, struct device *dev,
- struct ttm_operation_ctx *ctx);
-void ttm_dma_unpopulate(struct ttm_tt *ttm_dma, struct device *dev);
-
-#else
-static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
- unsigned max_pages)
-{
- return -ENODEV;
-}
-
-static inline void ttm_dma_page_alloc_fini(void) { return; }
-
-static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
-{
- return 0;
-}
-static inline int ttm_dma_populate(struct ttm_tt *ttm_dma,
- struct device *dev,
- struct ttm_operation_ctx *ctx)
-{
- return -ENOMEM;
-}
-static inline void ttm_dma_unpopulate(struct ttm_tt *ttm_dma,
- struct device *dev)
-{
-}
-#endif
-
-#endif
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
new file mode 100644
index 000000000000..4321728bdd11
--- /dev/null
+++ b/include/drm/ttm/ttm_pool.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_PAGE_POOL_H_
+#define _TTM_PAGE_POOL_H_
+
+#include <linux/mmzone.h>
+#include <linux/llist.h>
+#include <linux/spinlock.h>
+#include <drm/ttm/ttm_caching.h>
+
+struct device;
+struct ttm_tt;
+struct ttm_pool;
+struct ttm_operation_ctx;
+
+/**
+ * ttm_pool_type - Pool for a certain memory type
+ *
+ * @pool: the pool we belong to, might be NULL for the global ones
+ * @order: the allocation order our pages have
+ * @caching: the caching type our pages have
+ * @shrinker_list: our place on the global shrinker list
+ * @lock: protection of the page list
+ * @pages: the list of pages in the pool
+ */
+struct ttm_pool_type {
+ struct ttm_pool *pool;
+ unsigned int order;
+ enum ttm_caching caching;
+
+ struct list_head shrinker_list;
+
+ spinlock_t lock;
+ struct list_head pages;
+};
+
+/**
+ * ttm_pool - Pool for all caching and orders
+ *
+ * @use_dma_alloc: if coherent DMA allocations should be used
+ * @use_dma32: if GFP_DMA32 should be used
+ * @caching: pools for each caching/order
+ */
+struct ttm_pool {
+ struct device *dev;
+
+ bool use_dma_alloc;
+ bool use_dma32;
+
+ struct {
+ struct ttm_pool_type orders[MAX_ORDER];
+ } caching[TTM_NUM_CACHING_TYPES];
+};
+
+int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ struct ttm_operation_ctx *ctx);
+void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt);
+
+void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
+ bool use_dma_alloc, bool use_dma32);
+void ttm_pool_fini(struct ttm_pool *pool);
+
+int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);
+
+int ttm_pool_mgr_init(unsigned long num_pages);
+void ttm_pool_mgr_fini(void);
+
+#endif
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index df9a80650feb..da27e9d8fa64 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -37,7 +37,6 @@ struct ttm_operation_ctx;
#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
-#define TTM_PAGE_FLAG_DMA32 (1 << 7)
#define TTM_PAGE_FLAG_SG (1 << 8)
#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
@@ -66,7 +65,6 @@ struct ttm_tt {
struct sg_table *sg;
dma_addr_t *dma_address;
struct file *swap_storage;
- struct list_head pages_list;
enum ttm_caching caching;
};
diff --git a/include/linux/font.h b/include/linux/font.h
index b5b312c19e46..4f50d736ea72 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -16,7 +16,7 @@
struct font_desc {
int idx;
const char *name;
- int width, height;
+ unsigned int width, height;
const void *data;
int pref;
};
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 36c47e7e66a2..6f70572b2938 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -19,12 +19,6 @@ struct scatterlist {
};
/*
- * Since the above length field is an unsigned int, below we define the maximum
- * length in bytes that can be stored in one scatterlist entry.
- */
-#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK)
-
-/*
* These macros should be used after a dma_map_sg call has been done
* to get bus addresses of each of the SG entries and their lengths.
* You should only work with the number of sg entries dma_map_sg
diff --git a/tools/testing/scatterlist/main.c b/tools/testing/scatterlist/main.c
index b2c7e9f7b8d3..d264bf853034 100644
--- a/tools/testing/scatterlist/main.c
+++ b/tools/testing/scatterlist/main.c
@@ -50,7 +50,7 @@ static void fail(struct test *test, struct sg_table *st, const char *cond)
int main(void)
{
- const unsigned int sgmax = SCATTERLIST_MAX_SEGMENT;
+ const unsigned int sgmax = UINT_MAX;
struct test *test, tests[] = {
{ -EINVAL, 1, pfn(0), PAGE_SIZE, PAGE_SIZE + 1, 1 },
{ -EINVAL, 1, pfn(0), PAGE_SIZE, 0, 1 },