From 7904bcdcf6b56602a049ed2b47282db63671fa99 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 5 Feb 2025 03:12:51 +1000 Subject: drm/nouveau/gsp: fix rm shutdown wait condition Though the initial upstreamed GSP-RM version in nouveau was 535.113.01, the code was developed against earlier versions. 535.42.02 modified the mailbox value used by GSP-RM to signal shutdown has completed, which was missed at the time. I'm not aware of any issues caused by this, but noticed the bug while working on GB20x support. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index 969f6b921fdb..64b58efd3132 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -2844,7 +2844,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) return ret; nvkm_msec(gsp->subdev.device, 2000, - if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000) + if (nvkm_falcon_rd32(&gsp->falcon, 0x040) == 0x80000000) break; ); -- cgit v1.2.3 From b8a90901db9d2b50093e105f2e016624c627c610 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 13 May 2025 05:48:56 +1000 Subject: drm/nouveau/gsp: remove gsp-specific chid allocation path In order to specify a channel ID to RM during channel allocation, the channel ID is broken down into a "userd page" index and an index into that page. It was assumed that RM would enforce that the same physical block of memory be used for all CHIDs within a "userd page", and the GSP paths override NVKM's normal CHID allocation to handle this. However, none of that turns out to be necessary. Remove the GSP-specific code and use the regular CHID allocation path. Signed-off-by: Ben Skeggs Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h | 3 - drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c | 5 - drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c | 46 ++++----- drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h | 3 - drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c | 115 --------------------- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 2 +- 6 files changed, 20 insertions(+), 154 deletions(-) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h index be508f65b280..96c16cfccf16 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h @@ -78,9 +78,6 @@ struct nvkm_fifo { struct { struct nvkm_memory *mem; struct nvkm_vma *bar1; - - struct mutex mutex; - struct list_head list; } userd; struct { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c index 22443fe4a39f..3c2ca711dc5c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c @@ -349,8 +349,6 @@ nvkm_fifo_dtor(struct nvkm_engine *engine) nvkm_chid_unref(&fifo->cgid); nvkm_chid_unref(&fifo->chid); - mutex_destroy(&fifo->userd.mutex); - nvkm_event_fini(&fifo->nonstall.event); mutex_destroy(&fifo->mutex); @@ -391,8 +389,5 @@ nvkm_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, spin_lock_init(&fifo->lock); mutex_init(&fifo->mutex); - INIT_LIST_HEAD(&fifo->userd.list); - mutex_init(&fifo->userd.mutex); - return nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c index 7d4716dcd512..78be7abc90d1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c @@ -275,11 +275,7 @@ nvkm_chan_del(struct nvkm_chan **pchan) nvkm_gpuobj_del(&chan->ramfc); if (chan->cgrp) { - if (!chan->func->id_put) - nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock); - else - chan->func->id_put(chan); - + nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock); nvkm_cgrp_unref(&chan->cgrp); } @@ -441,30 +437,26 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru } /* Allocate channel ID. */ - if (!chan->func->id_get) { - chan->id = nvkm_chid_get(runl->chid, chan); - if (chan->id >= 0) { - if (func->userd->bar < 0) { - if (ouserd + chan->func->userd->size >= - nvkm_memory_size(userd)) { - RUNL_DEBUG(runl, "ouserd %llx", ouserd); - return -EINVAL; - } - - ret = nvkm_memory_kmap(userd, &chan->userd.mem); - if (ret) { - RUNL_DEBUG(runl, "userd %d", ret); - return ret; - } - - chan->userd.base = ouserd; - } else { - chan->userd.mem = nvkm_memory_ref(fifo->userd.mem); - chan->userd.base = chan->id * chan->func->userd->size; + chan->id = nvkm_chid_get(runl->chid, chan); + if (chan->id >= 0) { + if (func->userd->bar < 0) { + if (ouserd + chan->func->userd->size >= + nvkm_memory_size(userd)) { + RUNL_DEBUG(runl, "ouserd %llx", ouserd); + return -EINVAL; + } + + ret = nvkm_memory_kmap(userd, &chan->userd.mem); + if (ret) { + RUNL_DEBUG(runl, "userd %d", ret); + return ret; } + + chan->userd.base = ouserd; + } else { + chan->userd.mem = nvkm_memory_ref(fifo->userd.mem); + chan->userd.base = chan->id * chan->func->userd->size; } - } else { - chan->id = chan->func->id_get(chan, userd, ouserd); } if (chan->id < 0) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h index 013682a709d5..85b94f699128 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h @@ -17,9 +17,6 @@ struct nvkm_cctx { }; struct nvkm_chan_func { - int (*id_get)(struct nvkm_chan *, struct nvkm_memory *userd, u64 ouserd); - void (*id_put)(struct nvkm_chan *); - const struct nvkm_chan_func_inst { u32 size; bool zero; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c index 3454c7d29502..129f274c9bfd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c @@ -215,123 +215,8 @@ r535_chan_ramfc = { .priv = true, }; -struct r535_chan_userd { - struct nvkm_memory *mem; - struct nvkm_memory *map; - int chid; - u32 used; - - struct list_head head; -} *userd; - -static void -r535_chan_id_put(struct nvkm_chan *chan) -{ - struct nvkm_runl *runl = chan->cgrp->runl; - struct nvkm_fifo *fifo = runl->fifo; - struct r535_chan_userd *userd; - - mutex_lock(&fifo->userd.mutex); - list_for_each_entry(userd, &fifo->userd.list, head) { - if (userd->map == chan->userd.mem) { - u32 chid = chan->userd.base / chan->func->userd->size; - - userd->used &= ~BIT(chid); - if (!userd->used) { - nvkm_memory_unref(&userd->map); - nvkm_memory_unref(&userd->mem); - nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock); - list_del(&userd->head); - kfree(userd); - } - - break; - } - } - mutex_unlock(&fifo->userd.mutex); - -} - -static int -r535_chan_id_get_locked(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd) -{ - const u32 userd_size = CHID_PER_USERD * chan->func->userd->size; - struct nvkm_runl *runl = chan->cgrp->runl; - struct nvkm_fifo *fifo = runl->fifo; - struct r535_chan_userd *userd; - u32 chid; - int ret; - - if (ouserd + chan->func->userd->size >= userd_size || - (ouserd & (chan->func->userd->size - 1))) { - RUNL_DEBUG(runl, "ouserd %llx", ouserd); - return -EINVAL; - } - - chid = div_u64(ouserd, chan->func->userd->size); - - list_for_each_entry(userd, &fifo->userd.list, head) { - if (userd->mem == muserd) { - if (userd->used & BIT(chid)) - return -EBUSY; - break; - } - } - - if (&userd->head == &fifo->userd.list) { - if (nvkm_memory_size(muserd) < userd_size) { - RUNL_DEBUG(runl, "userd too small"); - return -EINVAL; - } - - userd = kzalloc(sizeof(*userd), GFP_KERNEL); - if (!userd) - return -ENOMEM; - - userd->chid = nvkm_chid_get(runl->chid, chan); - if (userd->chid < 0) { - ret = userd->chid; - kfree(userd); - return ret; - } - - userd->mem = nvkm_memory_ref(muserd); - - ret = nvkm_memory_kmap(userd->mem, &userd->map); - if (ret) { - nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock); - kfree(userd); - return ret; - } - - - list_add(&userd->head, &fifo->userd.list); - } - - userd->used |= BIT(chid); - - chan->userd.mem = nvkm_memory_ref(userd->map); - chan->userd.base = ouserd; - - return (userd->chid * CHID_PER_USERD) + chid; -} - -static int -r535_chan_id_get(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd) -{ - struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; - int ret; - - mutex_lock(&fifo->userd.mutex); - ret = r535_chan_id_get_locked(chan, muserd, ouserd); - mutex_unlock(&fifo->userd.mutex); - return ret; -} - static const struct nvkm_chan_func r535_chan = { - .id_get = r535_chan_id_get, - .id_put = r535_chan_id_put, .inst = &gf100_chan_inst, .userd = &gv100_chan_userd, .ramfc = &r535_chan_ramfc, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index 64b58efd3132..2bb726c0c49f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -1932,7 +1932,7 @@ r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc) msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope, msg->partitionAttributionId); - chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags); + chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid, &flags); if (!chan) { nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid); return 0; -- cgit v1.2.3 From 52cae0b140d3d3f46589f62155000476bee63e73 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 25 Nov 2024 10:46:40 +1000 Subject: drm/nouveau/ce: bump max instances to 20 560.28.03 supports more copy engine instances. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvkm/core/layout.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h index 9d2a1abf64f9..4e027c5b00c3 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h @@ -29,7 +29,7 @@ NVKM_LAYOUT_INST(NVKM_SUBDEV_IOCTRL , struct nvkm_subdev , ioctrl, 3) NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FLA , struct nvkm_subdev , fla) NVKM_LAYOUT_ONCE(NVKM_ENGINE_BSP , struct nvkm_engine , bsp) -NVKM_LAYOUT_INST(NVKM_ENGINE_CE , struct nvkm_engine , ce, 10) +NVKM_LAYOUT_INST(NVKM_ENGINE_CE , struct nvkm_engine , ce, 20) NVKM_LAYOUT_ONCE(NVKM_ENGINE_CIPHER , struct nvkm_engine , cipher) NVKM_LAYOUT_ONCE(NVKM_ENGINE_DISP , struct nvkm_disp , disp) NVKM_LAYOUT_ONCE(NVKM_ENGINE_DMAOBJ , struct nvkm_dma , dma) -- cgit v1.2.3 From 9cc5c1951b9bb497b9fa48e7e0db0b7fba948fdf Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 18 Nov 2024 09:47:14 +1000 Subject: drm/nouveau/nvenc: bump max instances to 4 570.86.16 supports more NVENC instances. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvkm/core/layout.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h index 4e027c5b00c3..33e3bc519b9b 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h @@ -43,7 +43,7 @@ NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPDEC , struct nvkm_engine , mspdec) NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPPP , struct nvkm_engine , msppp) NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSVLD , struct nvkm_engine , msvld) NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC , struct nvkm_nvdec , nvdec, 8) -NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 3) +NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 4) NVKM_LAYOUT_INST(NVKM_ENGINE_NVJPG , struct nvkm_engine , nvjpg, 8) NVKM_LAYOUT_ONCE(NVKM_ENGINE_OFA , struct nvkm_engine , ofa) NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC , struct nvkm_engine , sec) -- cgit v1.2.3 From 4848de6e4161886c4ab55aa68d5777c449a683c2 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 21 Feb 2025 06:26:13 +1000 Subject: drm/nouveau/ofa: bump max instances to 2 560.28.03 supports more NVENC instances. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvkm/core/layout.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h index 33e3bc519b9b..2debef27bd95 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h @@ -45,7 +45,7 @@ NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSVLD , struct nvkm_engine , msvld) NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC , struct nvkm_nvdec , nvdec, 8) NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 4) NVKM_LAYOUT_INST(NVKM_ENGINE_NVJPG , struct nvkm_engine , nvjpg, 8) -NVKM_LAYOUT_ONCE(NVKM_ENGINE_OFA , struct nvkm_engine , ofa) +NVKM_LAYOUT_INST(NVKM_ENGINE_OFA , struct nvkm_engine , ofa, 2) NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC , struct nvkm_engine , sec) NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC2 , struct nvkm_sec2 , sec2) NVKM_LAYOUT_ONCE(NVKM_ENGINE_SW , struct nvkm_sw , sw) -- cgit v1.2.3 From 8a8b1ec5261f20d86c76c8fb235ee2441744bc10 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:36 +1000 Subject: drm/nouveau/gsp: split rpc handling out on its own Later patches in the series add HALs around various RM APIs in order to support a newer version of GSP-RM firmware. In order to do this, begin by splitting the code up into "modules" that roughly represent RM's API boundaries so they can be more easily managed. Aside from moving the RPC function pointers, no code change is indended. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/Kbuild | 1 + drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h | 13 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild | 2 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 665 +------------------- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild | 5 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild | 6 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c | 10 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c | 692 +++++++++++++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 20 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h | 18 + 10 files changed, 762 insertions(+), 670 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild index 7b863355c5c6..0759ba15954b 100644 --- a/drivers/gpu/drm/nouveau/Kbuild +++ b/drivers/gpu/drm/nouveau/Kbuild @@ -2,6 +2,7 @@ ccflags-y += -I $(src)/include ccflags-y += -I $(src)/include/nvkm ccflags-y += -I $(src)/nvkm +ccflags-y += -I $(src)/nvkm/subdev/gsp ccflags-y += -I $(src) # NVKM - HW resource manager diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 1c12854a8550..b543c31d3d32 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -210,10 +210,7 @@ struct nvkm_gsp { } gr; const struct nvkm_gsp_rm { - void *(*rpc_get)(struct nvkm_gsp *, u32 fn, u32 argc); - void *(*rpc_push)(struct nvkm_gsp *gsp, void *argv, - enum nvkm_gsp_rpc_reply_policy policy, u32 repc); - void (*rpc_done)(struct nvkm_gsp *gsp, void *repv); + const struct nvkm_rm_api *api; void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc); int (*rm_ctrl_push)(struct nvkm_gsp_object *, void **argv, u32 repc); @@ -272,17 +269,19 @@ nvkm_gsp_rm(struct nvkm_gsp *gsp) return gsp && (gsp->fws.rm || gsp->fw.img); } +#include + static inline void * nvkm_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc) { - return gsp->rm->rpc_get(gsp, fn, argc); + return gsp->rm->api->rpc->get(gsp, fn, argc); } static inline void * nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, enum nvkm_gsp_rpc_reply_policy policy, u32 repc) { - return gsp->rm->rpc_push(gsp, argv, policy, repc); + return gsp->rm->api->rpc->push(gsp, argv, policy, repc); } static inline void * @@ -311,7 +310,7 @@ nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, static inline void nvkm_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv) { - gsp->rm->rpc_done(gsp, repv); + gsp->rm->api->rpc->done(gsp, repv); } static inline void * diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild index 16bf2f1bb780..af6e55603763 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild @@ -10,3 +10,5 @@ nvkm-y += nvkm/subdev/gsp/ga102.o nvkm-y += nvkm/subdev/gsp/ad102.o nvkm-y += nvkm/subdev/gsp/r535.o + +include $(src)/nvkm/subdev/gsp/rm/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index 2bb726c0c49f..745d43586bad 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -19,6 +19,7 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ +#include #include "priv.h" #include @@ -60,578 +61,6 @@ extern struct dentry *nouveau_debugfs_root; -#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE -#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16) - -/** - * DOC: GSP message queue element - * - * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h - * - * The GSP command queue and status queue are message queues for the - * communication between software and GSP. The software submits the GSP - * RPC via the GSP command queue, GSP writes the status of the submitted - * RPC in the status queue. - * - * A GSP message queue element consists of three parts: - * - * - message element header (struct r535_gsp_msg), which mostly maintains - * the metadata for queuing the element. - * - * - RPC message header (struct nvfw_gsp_rpc), which maintains the info - * of the RPC. E.g., the RPC function number. - * - * - The payload, where the RPC message stays. E.g. the params of a - * specific RPC function. Some RPC functions also have their headers - * in the payload. E.g. rm_alloc, rm_control. - * - * The memory layout of a GSP message element can be illustrated below:: - * - * +------------------------+ - * | Message Element Header | - * | (r535_gsp_msg) | - * | | - * | (r535_gsp_msg.data) | - * | | | - * |----------V-------------| - * | GSP RPC Header | - * | (nvfw_gsp_rpc) | - * | | - * | (nvfw_gsp_rpc.data) | - * | | | - * |----------V-------------| - * | Payload | - * | | - * | header(optional) | - * | params | - * +------------------------+ - * - * The max size of a message queue element is 16 pages (including the - * headers). When a GSP message to be sent is larger than 16 pages, the - * message should be split into multiple elements and sent accordingly. - * - * In the bunch of the split elements, the first element has the expected - * function number, while the rest of the elements are sent with the - * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD. - * - * GSP consumes the elements from the cmdq and always writes the result - * back to the msgq. The result is also formed as split elements. - * - * Terminology: - * - * - gsp_msg(msg): GSP message element (element header + GSP RPC header + - * payload) - * - gsp_rpc(rpc): GSP RPC (RPC header + payload) - * - gsp_rpc_buf: buffer for (GSP RPC header + payload) - * - gsp_rpc_len: size of (GSP RPC header + payload) - * - params_size: size of params in the payload - * - payload_size: size of (header if exists + params) in the payload - */ - -struct r535_gsp_msg { - u8 auth_tag_buffer[16]; - u8 aad_buffer[16]; - u32 checksum; - u32 sequence; - u32 elem_count; - u32 pad; - u8 data[]; -}; - -struct nvfw_gsp_rpc { - u32 header_version; - u32 signature; - u32 length; - u32 function; - u32 rpc_result; - u32 rpc_result_private; - u32 sequence; - union { - u32 spare; - u32 cpuRmGfid; - }; - u8 data[]; -}; - -#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data) - -#define to_gsp_hdr(p, header) \ - container_of((void *)p, typeof(*header), data) - -#define to_payload_hdr(p, header) \ - container_of((void *)p, typeof(*header), params) - -static int -r535_rpc_status_to_errno(uint32_t rpc_status) -{ - switch (rpc_status) { - case 0x55: /* NV_ERR_NOT_READY */ - case 0x66: /* NV_ERR_TIMEOUT_RETRY */ - return -EBUSY; - case 0x51: /* NV_ERR_NO_MEMORY */ - return -ENOMEM; - default: - return -EINVAL; - } -} - -static int -r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime) -{ - u32 size, rptr = *gsp->msgq.rptr; - int used; - - size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len, - GSP_PAGE_SIZE); - if (WARN_ON(!size || size >= gsp->msgq.cnt)) - return -EINVAL; - - do { - u32 wptr = *gsp->msgq.wptr; - - used = wptr + gsp->msgq.cnt - rptr; - if (used >= gsp->msgq.cnt) - used -= gsp->msgq.cnt; - if (used >= size) - break; - - usleep_range(1, 2); - } while (--(*ptime)); - - if (WARN_ON(!*ptime)) - return -ETIMEDOUT; - - return used; -} - -static struct r535_gsp_msg * -r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp) -{ - u32 rptr = *gsp->msgq.rptr; - - /* Skip the first page, which is the message queue info */ - return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE + - rptr * GSP_PAGE_SIZE); -} - -/** - * DOC: Receive a GSP message queue element - * - * Receiving a GSP message queue element from the message queue consists of - * the following steps: - * - * - Peek the element from the queue: r535_gsp_msgq_peek(). - * Peek the first page of the element to determine the total size of the - * message before allocating the proper memory. - * - * - Allocate memory for the message. - * Once the total size of the message is determined from the GSP message - * queue element, the caller of r535_gsp_msgq_recv() allocates the - * required memory. - * - * - Receive the message: r535_gsp_msgq_recv(). - * Copy the message into the allocated memory. Advance the read pointer. - * If the message is a large GSP message, r535_gsp_msgq_recv() calls - * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts - * until the complete message is received. - * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into - * the return of the large GSP message. - * - * - Free the allocated memory: r535_gsp_msg_done(). - * The user is responsible for freeing the memory allocated for the GSP - * message pages after they have been processed. - */ -static void * -r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries) -{ - struct r535_gsp_msg *mqe; - int ret; - - ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries); - if (ret < 0) - return ERR_PTR(ret); - - mqe = r535_gsp_msgq_get_entry(gsp); - - return mqe->data; -} - -struct r535_gsp_msg_info { - int *retries; - u32 gsp_rpc_len; - void *gsp_rpc_buf; - bool continuation; -}; - -static void -r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl); - -static void * -r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp, - struct r535_gsp_msg_info *info) -{ - u8 *buf = info->gsp_rpc_buf; - u32 rptr = *gsp->msgq.rptr; - struct r535_gsp_msg *mqe; - u32 size, expected, len; - int ret; - - expected = info->gsp_rpc_len; - - ret = r535_gsp_msgq_wait(gsp, expected, info->retries); - if (ret < 0) - return ERR_PTR(ret); - - mqe = r535_gsp_msgq_get_entry(gsp); - - if (info->continuation) { - struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data; - - if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) { - nvkm_error(&gsp->subdev, - "Not a continuation of a large RPC\n"); - r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR); - return ERR_PTR(-EIO); - } - } - - size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE); - - len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe); - len = min_t(u32, expected, len); - - if (info->continuation) - memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc), - len - sizeof(struct nvfw_gsp_rpc)); - else - memcpy(buf, mqe->data, len); - - expected -= len; - - if (expected) { - mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000); - memcpy(buf + len, mqe, expected); - } - - rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt; - - mb(); - (*gsp->msgq.rptr) = rptr; - return buf; -} - -static void * -r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries) -{ - struct r535_gsp_msg *mqe; - const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe); - struct nvfw_gsp_rpc *rpc; - struct r535_gsp_msg_info info = {0}; - u32 expected = gsp_rpc_len; - void *buf; - - mqe = r535_gsp_msgq_get_entry(gsp); - rpc = (struct nvfw_gsp_rpc *)mqe->data; - - if (WARN_ON(rpc->length > max_rpc_size)) - return NULL; - - buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL); - if (!buf) - return ERR_PTR(-ENOMEM); - - info.gsp_rpc_buf = buf; - info.retries = retries; - info.gsp_rpc_len = rpc->length; - - buf = r535_gsp_msgq_recv_one_elem(gsp, &info); - if (IS_ERR(buf)) { - kvfree(info.gsp_rpc_buf); - info.gsp_rpc_buf = NULL; - return buf; - } - - if (expected <= max_rpc_size) - return buf; - - info.gsp_rpc_buf += info.gsp_rpc_len; - expected -= info.gsp_rpc_len; - - while (expected) { - u32 size; - - rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries); - if (IS_ERR_OR_NULL(rpc)) { - kfree(buf); - return rpc; - } - - info.gsp_rpc_len = rpc->length; - info.continuation = true; - - rpc = r535_gsp_msgq_recv_one_elem(gsp, &info); - if (IS_ERR_OR_NULL(rpc)) { - kfree(buf); - return rpc; - } - - size = info.gsp_rpc_len - sizeof(*rpc); - expected -= size; - info.gsp_rpc_buf += size; - } - - rpc = buf; - rpc->length = gsp_rpc_len; - return buf; -} - -static int -r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc) -{ - struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg); - struct r535_gsp_msg *cqe; - u32 gsp_rpc_len = msg->checksum; - u64 *ptr = (void *)msg; - u64 *end; - u64 csum = 0; - int free, time = 1000000; - u32 wptr, size, step, len; - u32 off = 0; - - len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE); - - end = (u64 *)((char *)ptr + len); - msg->pad = 0; - msg->checksum = 0; - msg->sequence = gsp->cmdq.seq++; - msg->elem_count = DIV_ROUND_UP(len, 0x1000); - - while (ptr < end) - csum ^= *ptr++; - - msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum); - - wptr = *gsp->cmdq.wptr; - do { - do { - free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1; - if (free >= gsp->cmdq.cnt) - free -= gsp->cmdq.cnt; - if (free >= 1) - break; - - usleep_range(1, 2); - } while(--time); - - if (WARN_ON(!time)) { - kvfree(msg); - return -ETIMEDOUT; - } - - cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000); - step = min_t(u32, free, (gsp->cmdq.cnt - wptr)); - size = min_t(u32, len, step * GSP_PAGE_SIZE); - - memcpy(cqe, (u8 *)msg + off, size); - - wptr += DIV_ROUND_UP(size, 0x1000); - if (wptr == gsp->cmdq.cnt) - wptr = 0; - - off += size; - len -= size; - } while (len); - - nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr); - wmb(); - (*gsp->cmdq.wptr) = wptr; - mb(); - - nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000); - - kvfree(msg); - return 0; -} - -static void * -r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len) -{ - struct r535_gsp_msg *msg; - u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len; - - size = ALIGN(size, GSP_MSG_MIN_SIZE); - msg = kvzalloc(size, GFP_KERNEL); - if (!msg) - return ERR_PTR(-ENOMEM); - - msg->checksum = gsp_rpc_len; - return msg->data; -} - -static void -r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg) -{ - kvfree(msg); -} - -static void -r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl) -{ - if (gsp->subdev.debug >= lvl) { - nvkm_printk__(&gsp->subdev, lvl, info, - "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n", - msg->function, msg->length, msg->length - sizeof(*msg), - msg->rpc_result, msg->rpc_result_private); - print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1, - msg->data, msg->length - sizeof(*msg), true); - } -} - -static struct nvfw_gsp_rpc * -r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len) -{ - struct nvkm_subdev *subdev = &gsp->subdev; - struct nvfw_gsp_rpc *rpc; - int retries = 4000000, i; - -retry: - rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries); - if (IS_ERR_OR_NULL(rpc)) - return rpc; - - rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries); - if (IS_ERR_OR_NULL(rpc)) - return rpc; - - if (rpc->rpc_result) { - r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR); - r535_gsp_msg_done(gsp, rpc); - return ERR_PTR(-EINVAL); - } - - r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE); - - if (fn && rpc->function == fn) { - if (gsp_rpc_len) { - if (rpc->length < gsp_rpc_len) { - nvkm_error(subdev, "rpc len %d < %d\n", - rpc->length, gsp_rpc_len); - r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR); - r535_gsp_msg_done(gsp, rpc); - return ERR_PTR(-EIO); - } - - return rpc; - } - - r535_gsp_msg_done(gsp, rpc); - return NULL; - } - - for (i = 0; i < gsp->msgq.ntfy_nr; i++) { - struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i]; - - if (ntfy->fn == rpc->function) { - if (ntfy->func) - ntfy->func(ntfy->priv, ntfy->fn, rpc->data, - rpc->length - sizeof(*rpc)); - break; - } - } - - if (i == gsp->msgq.ntfy_nr) - r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN); - - r535_gsp_msg_done(gsp, rpc); - if (fn) - goto retry; - - if (*gsp->msgq.rptr != *gsp->msgq.wptr) - goto retry; - - return NULL; -} - -static int -r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv) -{ - int ret = 0; - - mutex_lock(&gsp->msgq.mutex); - if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) { - ret = -ENOSPC; - } else { - gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn; - gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func; - gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv; - gsp->msgq.ntfy_nr++; - } - mutex_unlock(&gsp->msgq.mutex); - return ret; -} - -static int -r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn) -{ - void *repv; - - mutex_lock(&gsp->cmdq.mutex); - repv = r535_gsp_msg_recv(gsp, fn, 0); - mutex_unlock(&gsp->cmdq.mutex); - if (IS_ERR(repv)) - return PTR_ERR(repv); - - return 0; -} - -static void * -r535_gsp_rpc_handle_reply(struct nvkm_gsp *gsp, u32 fn, - enum nvkm_gsp_rpc_reply_policy policy, - u32 gsp_rpc_len) -{ - struct nvfw_gsp_rpc *reply; - void *repv = NULL; - - switch (policy) { - case NVKM_GSP_RPC_REPLY_NOWAIT: - break; - case NVKM_GSP_RPC_REPLY_RECV: - reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len); - if (!IS_ERR_OR_NULL(reply)) - repv = reply->data; - else - repv = reply; - break; - case NVKM_GSP_RPC_REPLY_POLL: - repv = r535_gsp_msg_recv(gsp, fn, 0); - break; - } - - return repv; -} - -static void * -r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, - enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len) -{ - struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc); - u32 fn = rpc->function; - int ret; - - if (gsp->subdev.debug >= NV_DBG_TRACE) { - nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function, - rpc->length, rpc->length - sizeof(*rpc)); - print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1, - rpc->data, rpc->length - sizeof(*rpc), true); - } - - ret = r535_gsp_cmdq_push(gsp, rpc); - if (ret) - return ERR_PTR(ret); - - return r535_gsp_rpc_handle_reply(gsp, fn, policy, gsp_rpc_len); -} - static void r535_gsp_event_dtor(struct nvkm_gsp_event *event) { @@ -936,99 +365,9 @@ r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_siz return rpc->params; } -static void -r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv) -{ - struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data); - - r535_gsp_msg_done(gsp, rpc); -} - -static void * -r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size) -{ - struct nvfw_gsp_rpc *rpc; - - rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size, - sizeof(u64))); - if (IS_ERR(rpc)) - return ERR_CAST(rpc); - - rpc->header_version = 0x03000000; - rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V'; - rpc->function = fn; - rpc->rpc_result = 0xffffffff; - rpc->rpc_result_private = 0xffffffff; - rpc->length = sizeof(*rpc) + payload_size; - return rpc->data; -} - -static void * -r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, - enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len) -{ - struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc); - struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg); - const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg); - const u32 max_payload_size = max_rpc_size - sizeof(*rpc); - u32 payload_size = rpc->length - sizeof(*rpc); - void *repv; - - mutex_lock(&gsp->cmdq.mutex); - if (payload_size > max_payload_size) { - const u32 fn = rpc->function; - u32 remain_payload_size = payload_size; - - /* Adjust length, and send initial RPC. */ - rpc->length = sizeof(*rpc) + max_payload_size; - msg->checksum = rpc->length; - - repv = r535_gsp_rpc_send(gsp, payload, NVKM_GSP_RPC_REPLY_NOWAIT, 0); - if (IS_ERR(repv)) - goto done; - - payload += max_payload_size; - remain_payload_size -= max_payload_size; - - /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */ - while (remain_payload_size) { - u32 size = min(remain_payload_size, - max_payload_size); - void *next; - - next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size); - if (IS_ERR(next)) { - repv = next; - goto done; - } - - memcpy(next, payload, size); - - repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0); - if (IS_ERR(repv)) - goto done; - - payload += size; - remain_payload_size -= size; - } - - /* Wait for reply. */ - repv = r535_gsp_rpc_handle_reply(gsp, fn, policy, payload_size + - sizeof(*rpc)); - } else { - repv = r535_gsp_rpc_send(gsp, payload, policy, gsp_rpc_len); - } - -done: - mutex_unlock(&gsp->cmdq.mutex); - return repv; -} - const struct nvkm_gsp_rm r535_gsp_rm = { - .rpc_get = r535_gsp_rpc_get, - .rpc_push = r535_gsp_rpc_push, - .rpc_done = r535_gsp_rpc_done, + .api = &r535_rm, .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get, .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild new file mode 100644 index 000000000000..1c07740215ec --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + +include $(src)/nvkm/subdev/gsp/rm/r535/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild new file mode 100644 index 000000000000..21c818ec0701 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + +nvkm-y += nvkm/subdev/gsp/rm/r535/rm.o +nvkm-y += nvkm/subdev/gsp/rm/r535/rpc.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c new file mode 100644 index 000000000000..f28b781abc5c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include + +const struct nvkm_rm_api +r535_rm = { + .rpc = &r535_rpc, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c new file mode 100644 index 000000000000..ffb4104a7d8c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c @@ -0,0 +1,692 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#include +#include + +#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE +#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16) + +/** + * DOC: GSP message queue element + * + * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h + * + * The GSP command queue and status queue are message queues for the + * communication between software and GSP. The software submits the GSP + * RPC via the GSP command queue, GSP writes the status of the submitted + * RPC in the status queue. + * + * A GSP message queue element consists of three parts: + * + * - message element header (struct r535_gsp_msg), which mostly maintains + * the metadata for queuing the element. + * + * - RPC message header (struct nvfw_gsp_rpc), which maintains the info + * of the RPC. E.g., the RPC function number. + * + * - The payload, where the RPC message stays. E.g. the params of a + * specific RPC function. Some RPC functions also have their headers + * in the payload. E.g. rm_alloc, rm_control. + * + * The memory layout of a GSP message element can be illustrated below:: + * + * +------------------------+ + * | Message Element Header | + * | (r535_gsp_msg) | + * | | + * | (r535_gsp_msg.data) | + * | | | + * |----------V-------------| + * | GSP RPC Header | + * | (nvfw_gsp_rpc) | + * | | + * | (nvfw_gsp_rpc.data) | + * | | | + * |----------V-------------| + * | Payload | + * | | + * | header(optional) | + * | params | + * +------------------------+ + * + * The max size of a message queue element is 16 pages (including the + * headers). When a GSP message to be sent is larger than 16 pages, the + * message should be split into multiple elements and sent accordingly. + * + * In the bunch of the split elements, the first element has the expected + * function number, while the rest of the elements are sent with the + * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD. + * + * GSP consumes the elements from the cmdq and always writes the result + * back to the msgq. The result is also formed as split elements. + * + * Terminology: + * + * - gsp_msg(msg): GSP message element (element header + GSP RPC header + + * payload) + * - gsp_rpc(rpc): GSP RPC (RPC header + payload) + * - gsp_rpc_buf: buffer for (GSP RPC header + payload) + * - gsp_rpc_len: size of (GSP RPC header + payload) + * - params_size: size of params in the payload + * - payload_size: size of (header if exists + params) in the payload + */ + +struct r535_gsp_msg { + u8 auth_tag_buffer[16]; + u8 aad_buffer[16]; + u32 checksum; + u32 sequence; + u32 elem_count; + u32 pad; + u8 data[]; +}; + +struct nvfw_gsp_rpc { + u32 header_version; + u32 signature; + u32 length; + u32 function; + u32 rpc_result; + u32 rpc_result_private; + u32 sequence; + union { + u32 spare; + u32 cpuRmGfid; + }; + u8 data[]; +}; + +#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data) + +#define to_gsp_hdr(p, header) \ + container_of((void *)p, typeof(*header), data) + +#define to_payload_hdr(p, header) \ + container_of((void *)p, typeof(*header), params) + +int +r535_rpc_status_to_errno(uint32_t rpc_status) +{ + switch (rpc_status) { + case 0x55: /* NV_ERR_NOT_READY */ + case 0x66: /* NV_ERR_TIMEOUT_RETRY */ + return -EBUSY; + case 0x51: /* NV_ERR_NO_MEMORY */ + return -ENOMEM; + default: + return -EINVAL; + } +} + +static int +r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime) +{ + u32 size, rptr = *gsp->msgq.rptr; + int used; + + size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len, + GSP_PAGE_SIZE); + if (WARN_ON(!size || size >= gsp->msgq.cnt)) + return -EINVAL; + + do { + u32 wptr = *gsp->msgq.wptr; + + used = wptr + gsp->msgq.cnt - rptr; + if (used >= gsp->msgq.cnt) + used -= gsp->msgq.cnt; + if (used >= size) + break; + + usleep_range(1, 2); + } while (--(*ptime)); + + if (WARN_ON(!*ptime)) + return -ETIMEDOUT; + + return used; +} + +static struct r535_gsp_msg * +r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp) +{ + u32 rptr = *gsp->msgq.rptr; + + /* Skip the first page, which is the message queue info */ + return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE + + rptr * GSP_PAGE_SIZE); +} + +/** + * DOC: Receive a GSP message queue element + * + * Receiving a GSP message queue element from the message queue consists of + * the following steps: + * + * - Peek the element from the queue: r535_gsp_msgq_peek(). + * Peek the first page of the element to determine the total size of the + * message before allocating the proper memory. + * + * - Allocate memory for the message. + * Once the total size of the message is determined from the GSP message + * queue element, the caller of r535_gsp_msgq_recv() allocates the + * required memory. + * + * - Receive the message: r535_gsp_msgq_recv(). + * Copy the message into the allocated memory. Advance the read pointer. + * If the message is a large GSP message, r535_gsp_msgq_recv() calls + * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts + * until the complete message is received. + * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into + * the return of the large GSP message. + * + * - Free the allocated memory: r535_gsp_msg_done(). + * The user is responsible for freeing the memory allocated for the GSP + * message pages after they have been processed. + */ +static void * +r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries) +{ + struct r535_gsp_msg *mqe; + int ret; + + ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries); + if (ret < 0) + return ERR_PTR(ret); + + mqe = r535_gsp_msgq_get_entry(gsp); + + return mqe->data; +} + +struct r535_gsp_msg_info { + int *retries; + u32 gsp_rpc_len; + void *gsp_rpc_buf; + bool continuation; +}; + +static void +r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl); + +static void * +r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp, + struct r535_gsp_msg_info *info) +{ + u8 *buf = info->gsp_rpc_buf; + u32 rptr = *gsp->msgq.rptr; + struct r535_gsp_msg *mqe; + u32 size, expected, len; + int ret; + + expected = info->gsp_rpc_len; + + ret = r535_gsp_msgq_wait(gsp, expected, info->retries); + if (ret < 0) + return ERR_PTR(ret); + + mqe = r535_gsp_msgq_get_entry(gsp); + + if (info->continuation) { + struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data; + + if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) { + nvkm_error(&gsp->subdev, + "Not a continuation of a large RPC\n"); + r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR); + return ERR_PTR(-EIO); + } + } + + size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE); + + len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe); + len = min_t(u32, expected, len); + + if (info->continuation) + memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc), + len - sizeof(struct nvfw_gsp_rpc)); + else + memcpy(buf, mqe->data, len); + + expected -= len; + + if (expected) { + mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000); + memcpy(buf + len, mqe, expected); + } + + rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt; + + mb(); + (*gsp->msgq.rptr) = rptr; + return buf; +} + +static void * +r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries) +{ + struct r535_gsp_msg *mqe; + const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe); + struct nvfw_gsp_rpc *rpc; + struct r535_gsp_msg_info info = {0}; + u32 expected = gsp_rpc_len; + void *buf; + + mqe = r535_gsp_msgq_get_entry(gsp); + rpc = (struct nvfw_gsp_rpc *)mqe->data; + + if (WARN_ON(rpc->length > max_rpc_size)) + return NULL; + + buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + info.gsp_rpc_buf = buf; + info.retries = retries; + info.gsp_rpc_len = rpc->length; + + buf = r535_gsp_msgq_recv_one_elem(gsp, &info); + if (IS_ERR(buf)) { + kvfree(info.gsp_rpc_buf); + info.gsp_rpc_buf = NULL; + return buf; + } + + if (expected <= max_rpc_size) + return buf; + + info.gsp_rpc_buf += info.gsp_rpc_len; + expected -= info.gsp_rpc_len; + + while (expected) { + u32 size; + + rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries); + if (IS_ERR_OR_NULL(rpc)) { + kfree(buf); + return rpc; + } + + info.gsp_rpc_len = rpc->length; + info.continuation = true; + + rpc = r535_gsp_msgq_recv_one_elem(gsp, &info); + if (IS_ERR_OR_NULL(rpc)) { + kfree(buf); + return rpc; + } + + size = info.gsp_rpc_len - sizeof(*rpc); + expected -= size; + info.gsp_rpc_buf += size; + } + + rpc = buf; + rpc->length = gsp_rpc_len; + return buf; +} + +static int +r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc) +{ + struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg); + struct r535_gsp_msg *cqe; + u32 gsp_rpc_len = msg->checksum; + u64 *ptr = (void *)msg; + u64 *end; + u64 csum = 0; + int free, time = 1000000; + u32 wptr, size, step, len; + u32 off = 0; + + len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE); + + end = (u64 *)((char *)ptr + len); + msg->pad = 0; + msg->checksum = 0; + msg->sequence = gsp->cmdq.seq++; + msg->elem_count = DIV_ROUND_UP(len, 0x1000); + + while (ptr < end) + csum ^= *ptr++; + + msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum); + + wptr = *gsp->cmdq.wptr; + do { + do { + free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1; + if (free >= gsp->cmdq.cnt) + free -= gsp->cmdq.cnt; + if (free >= 1) + break; + + usleep_range(1, 2); + } while(--time); + + if (WARN_ON(!time)) { + kvfree(msg); + return -ETIMEDOUT; + } + + cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000); + step = min_t(u32, free, (gsp->cmdq.cnt - wptr)); + size = min_t(u32, len, step * GSP_PAGE_SIZE); + + memcpy(cqe, (u8 *)msg + off, size); + + wptr += DIV_ROUND_UP(size, 0x1000); + if (wptr == gsp->cmdq.cnt) + wptr = 0; + + off += size; + len -= size; + } while (len); + + nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr); + wmb(); + (*gsp->cmdq.wptr) = wptr; + mb(); + + nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000); + + kvfree(msg); + return 0; +} + +static void * +r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len) +{ + struct r535_gsp_msg *msg; + u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len; + + size = ALIGN(size, GSP_MSG_MIN_SIZE); + msg = kvzalloc(size, GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + msg->checksum = gsp_rpc_len; + return msg->data; +} + +static void +r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg) +{ + kvfree(msg); +} + +static void +r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl) +{ + if (gsp->subdev.debug >= lvl) { + nvkm_printk__(&gsp->subdev, lvl, info, + "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n", + msg->function, msg->length, msg->length - sizeof(*msg), + msg->rpc_result, msg->rpc_result_private); + print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1, + msg->data, msg->length - sizeof(*msg), true); + } +} + +struct nvfw_gsp_rpc * +r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvfw_gsp_rpc *rpc; + int retries = 4000000, i; + +retry: + rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries); + if (IS_ERR_OR_NULL(rpc)) + return rpc; + + rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries); + if (IS_ERR_OR_NULL(rpc)) + return rpc; + + if (rpc->rpc_result) { + r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR); + r535_gsp_msg_done(gsp, rpc); + return ERR_PTR(-EINVAL); + } + + r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE); + + if (fn && rpc->function == fn) { + if (gsp_rpc_len) { + if (rpc->length < gsp_rpc_len) { + nvkm_error(subdev, "rpc len %d < %d\n", + rpc->length, gsp_rpc_len); + r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR); + r535_gsp_msg_done(gsp, rpc); + return ERR_PTR(-EIO); + } + + return rpc; + } + + r535_gsp_msg_done(gsp, rpc); + return NULL; + } + + for (i = 0; i < gsp->msgq.ntfy_nr; i++) { + struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i]; + + if (ntfy->fn == rpc->function) { + if (ntfy->func) + ntfy->func(ntfy->priv, ntfy->fn, rpc->data, + rpc->length - sizeof(*rpc)); + break; + } + } + + if (i == gsp->msgq.ntfy_nr) + r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN); + + r535_gsp_msg_done(gsp, rpc); + if (fn) + goto retry; + + if (*gsp->msgq.rptr != *gsp->msgq.wptr) + goto retry; + + return NULL; +} + +int +r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv) +{ + int ret = 0; + + mutex_lock(&gsp->msgq.mutex); + if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) { + ret = -ENOSPC; + } else { + gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn; + gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func; + gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv; + gsp->msgq.ntfy_nr++; + } + mutex_unlock(&gsp->msgq.mutex); + return ret; +} + +int +r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn) +{ + void *repv; + + mutex_lock(&gsp->cmdq.mutex); + repv = r535_gsp_msg_recv(gsp, fn, 0); + mutex_unlock(&gsp->cmdq.mutex); + if (IS_ERR(repv)) + return PTR_ERR(repv); + + return 0; +} + +static void * +r535_gsp_rpc_handle_reply(struct nvkm_gsp *gsp, u32 fn, + enum nvkm_gsp_rpc_reply_policy policy, + u32 gsp_rpc_len) +{ + struct nvfw_gsp_rpc *reply; + void *repv = NULL; + + switch (policy) { + case NVKM_GSP_RPC_REPLY_NOWAIT: + break; + case NVKM_GSP_RPC_REPLY_RECV: + reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len); + if (!IS_ERR_OR_NULL(reply)) + repv = reply->data; + else + repv = reply; + break; + case NVKM_GSP_RPC_REPLY_POLL: + repv = r535_gsp_msg_recv(gsp, fn, 0); + break; + } + + return repv; +} + +static void * +r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, + enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len) +{ + struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc); + u32 fn = rpc->function; + int ret; + + if (gsp->subdev.debug >= NV_DBG_TRACE) { + nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function, + rpc->length, rpc->length - sizeof(*rpc)); + print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1, + rpc->data, rpc->length - sizeof(*rpc), true); + } + + ret = r535_gsp_cmdq_push(gsp, rpc); + if (ret) + return ERR_PTR(ret); + + return r535_gsp_rpc_handle_reply(gsp, fn, policy, gsp_rpc_len); +} + +static void +r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv) +{ + struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data); + + r535_gsp_msg_done(gsp, rpc); +} + +static void * +r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size) +{ + struct nvfw_gsp_rpc *rpc; + + rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size, + sizeof(u64))); + if (IS_ERR(rpc)) + return ERR_CAST(rpc); + + rpc->header_version = 0x03000000; + rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V'; + rpc->function = fn; + rpc->rpc_result = 0xffffffff; + rpc->rpc_result_private = 0xffffffff; + rpc->length = sizeof(*rpc) + payload_size; + return rpc->data; +} + +static void * +r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, + enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len) +{ + struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc); + struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg); + const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg); + const u32 max_payload_size = max_rpc_size - sizeof(*rpc); + u32 payload_size = rpc->length - sizeof(*rpc); + void *repv; + + mutex_lock(&gsp->cmdq.mutex); + if (payload_size > max_payload_size) { + const u32 fn = rpc->function; + u32 remain_payload_size = payload_size; + + /* Adjust length, and send initial RPC. */ + rpc->length = sizeof(*rpc) + max_payload_size; + msg->checksum = rpc->length; + + repv = r535_gsp_rpc_send(gsp, payload, NVKM_GSP_RPC_REPLY_NOWAIT, 0); + if (IS_ERR(repv)) + goto done; + + payload += max_payload_size; + remain_payload_size -= max_payload_size; + + /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */ + while (remain_payload_size) { + u32 size = min(remain_payload_size, + max_payload_size); + void *next; + + next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size); + if (IS_ERR(next)) { + repv = next; + goto done; + } + + memcpy(next, payload, size); + + repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0); + if (IS_ERR(repv)) + goto done; + + payload += size; + remain_payload_size -= size; + } + + /* Wait for reply. */ + repv = r535_gsp_rpc_handle_reply(gsp, fn, policy, payload_size + + sizeof(*rpc)); + } else { + repv = r535_gsp_rpc_send(gsp, payload, policy, gsp_rpc_len); + } + +done: + mutex_unlock(&gsp->cmdq.mutex); + return repv; +} + +const struct nvkm_rm_api_rpc +r535_rpc = { + .get = r535_gsp_rpc_get, + .push = r535_gsp_rpc_push, + .done = r535_gsp_rpc_done, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h new file mode 100644 index 000000000000..7a0ece979167 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include +#ifndef __NVKM_RM_H__ +#define __NVKM_RM_H__ + +struct nvkm_rm_api { + const struct nvkm_rm_api_rpc { + void *(*get)(struct nvkm_gsp *, u32 fn, u32 argc); + void *(*push)(struct nvkm_gsp *gsp, void *argv, + enum nvkm_gsp_rpc_reply_policy policy, u32 repc); + void (*done)(struct nvkm_gsp *gsp, void *repv); + } *rpc; +}; + +extern const struct nvkm_rm_api r535_rm; +extern const struct nvkm_rm_api_rpc r535_rpc; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h new file mode 100644 index 000000000000..4431e33b3304 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __NVKM_RM_RPC_H__ +#define __NVKM_RM_RPC_H__ +#include "rm.h" + +#define to_payload_hdr(p, header) \ + container_of((void *)p, typeof(*header), params) + +int r535_gsp_rpc_poll(struct nvkm_gsp *, u32 fn); + +struct nvfw_gsp_rpc *r535_gsp_msg_recv(struct nvkm_gsp *, int fn, u32 gsp_rpc_len); +int r535_gsp_msg_ntfy_add(struct nvkm_gsp *, u32 fn, nvkm_gsp_msg_ntfy_func, void *priv); + +int r535_rpc_status_to_errno(uint32_t rpc_status); +#endif -- cgit v1.2.3 From 063d193f12b8c5c07cfe2e10972b2a687786284d Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:36 +1000 Subject: drm/nouveau/gsp: split rm ctrl handling out on its own Split base RM_CONTROL handling out into its own module. Aside from moving the function pointers, no code change is intended. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h | 10 +-- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 66 +-------------- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild | 1 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c | 94 ++++++++++++++++++++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 7 ++ 6 files changed, 107 insertions(+), 72 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index b543c31d3d32..bc2cf837aa9f 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -212,10 +212,6 @@ struct nvkm_gsp { const struct nvkm_gsp_rm { const struct nvkm_rm_api *api; - void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc); - int (*rm_ctrl_push)(struct nvkm_gsp_object *, void **argv, u32 repc); - void (*rm_ctrl_done)(struct nvkm_gsp_object *, void *repv); - void *(*rm_alloc_get)(struct nvkm_gsp_object *, u32 oclass, u32 argc); void *(*rm_alloc_push)(struct nvkm_gsp_object *, void *argv); void (*rm_alloc_done)(struct nvkm_gsp_object *, void *repv); @@ -316,13 +312,13 @@ nvkm_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv) static inline void * nvkm_gsp_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc) { - return object->client->gsp->rm->rm_ctrl_get(object, cmd, argc); + return object->client->gsp->rm->api->ctrl->get(object, cmd, argc); } static inline int nvkm_gsp_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc) { - return object->client->gsp->rm->rm_ctrl_push(object, argv, repc); + return object->client->gsp->rm->api->ctrl->push(object, argv, repc); } static inline void * @@ -353,7 +349,7 @@ nvkm_gsp_rm_ctrl_wr(struct nvkm_gsp_object *object, void *argv) static inline void nvkm_gsp_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv) { - object->client->gsp->rm->rm_ctrl_done(object, repv); + object->client->gsp->rm->api->ctrl->done(object, repv); } static inline void * diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index 745d43586bad..4797a92708e9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -20,6 +20,7 @@ * OTHER DEALINGS IN THE SOFTWARE. */ #include + #include "priv.h" #include @@ -304,75 +305,10 @@ r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, return rpc->params; } -static void -r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params) -{ - rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr(params, rpc); - - if (!params) - return; - nvkm_gsp_rpc_done(object->client->gsp, rpc); -} - -static int -r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 repc) -{ - rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr((*params), rpc); - struct nvkm_gsp *gsp = object->client->gsp; - int ret = 0; - - rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, repc); - if (IS_ERR_OR_NULL(rpc)) { - *params = NULL; - return PTR_ERR(rpc); - } - - if (rpc->status) { - ret = r535_rpc_status_to_errno(rpc->status); - if (ret != -EAGAIN && ret != -EBUSY) - nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n", - object->client->object.handle, object->handle, rpc->cmd, rpc->status); - } - - if (repc) - *params = rpc->params; - else - nvkm_gsp_rpc_done(gsp, rpc); - - return ret; -} - -static void * -r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_size) -{ - struct nvkm_gsp_client *client = object->client; - struct nvkm_gsp *gsp = client->gsp; - rpc_gsp_rm_control_v03_00 *rpc; - - nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x params_size:%d\n", - client->object.handle, object->handle, cmd, params_size); - - rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, - sizeof(*rpc) + params_size); - if (IS_ERR(rpc)) - return rpc; - - rpc->hClient = client->object.handle; - rpc->hObject = object->handle; - rpc->cmd = cmd; - rpc->status = 0; - rpc->paramsSize = params_size; - return rpc->params; -} - const struct nvkm_gsp_rm r535_gsp_rm = { .api = &r535_rm, - .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get, - .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push, - .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done, - .rm_alloc_get = r535_gsp_rpc_rm_alloc_get, .rm_alloc_push = r535_gsp_rpc_rm_alloc_push, .rm_alloc_done = r535_gsp_rpc_rm_alloc_done, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild index 21c818ec0701..c8d7419b754f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild @@ -4,3 +4,4 @@ nvkm-y += nvkm/subdev/gsp/rm/r535/rm.o nvkm-y += nvkm/subdev/gsp/rm/r535/rpc.o +nvkm-y += nvkm/subdev/gsp/rm/r535/ctrl.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c new file mode 100644 index 000000000000..f3f0fcd22cac --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c @@ -0,0 +1,94 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#include +#include +#include + +static void +r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params) +{ + rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr(params, rpc); + + if (!params) + return; + nvkm_gsp_rpc_done(object->client->gsp, rpc); +} + +static int +r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 repc) +{ + rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr((*params), rpc); + struct nvkm_gsp *gsp = object->client->gsp; + int ret = 0; + + rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, repc); + if (IS_ERR_OR_NULL(rpc)) { + *params = NULL; + return PTR_ERR(rpc); + } + + if (rpc->status) { + ret = r535_rpc_status_to_errno(rpc->status); + if (ret != -EAGAIN && ret != -EBUSY) + nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n", + object->client->object.handle, object->handle, rpc->cmd, rpc->status); + } + + if (repc) + *params = rpc->params; + else + nvkm_gsp_rpc_done(gsp, rpc); + + return ret; +} + +static void * +r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_size) +{ + struct nvkm_gsp_client *client = object->client; + struct nvkm_gsp *gsp = client->gsp; + rpc_gsp_rm_control_v03_00 *rpc; + + nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x params_size:%d\n", + client->object.handle, object->handle, cmd, params_size); + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, + sizeof(*rpc) + params_size); + if (IS_ERR(rpc)) + return rpc; + + rpc->hClient = client->object.handle; + rpc->hObject = object->handle; + rpc->cmd = cmd; + rpc->status = 0; + rpc->paramsSize = params_size; + return rpc->params; +} + +const struct nvkm_rm_api_ctrl +r535_ctrl = { + .get = r535_gsp_rpc_rm_ctrl_get, + .push = r535_gsp_rpc_rm_ctrl_push, + .done = r535_gsp_rpc_rm_ctrl_done, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c index f28b781abc5c..a3ee277a999d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c @@ -7,4 +7,5 @@ const struct nvkm_rm_api r535_rm = { .rpc = &r535_rpc, + .ctrl = &r535_ctrl, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 7a0ece979167..9558fbb59ae4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -13,8 +13,15 @@ struct nvkm_rm_api { enum nvkm_gsp_rpc_reply_policy policy, u32 repc); void (*done)(struct nvkm_gsp *gsp, void *repv); } *rpc; + + const struct nvkm_rm_api_ctrl { + void *(*get)(struct nvkm_gsp_object *, u32 cmd, u32 params_size); + int (*push)(struct nvkm_gsp_object *, void **params, u32 repc); + void (*done)(struct nvkm_gsp_object *, void *params); + } *ctrl; }; extern const struct nvkm_rm_api r535_rm; extern const struct nvkm_rm_api_rpc r535_rpc; +extern const struct nvkm_rm_api_ctrl r535_ctrl; #endif -- cgit v1.2.3 From be33f49980b557f2d6367bff6a2573ebabf310c9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:36 +1000 Subject: drm/nouveau/gsp: split rm alloc handling out on its own Split base RM_ALLOC handling out into its own module. Aside from moving the function pointers, no code change is intended. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h | 19 ++-- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 85 ---------------- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild | 1 + .../drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c | 113 +++++++++++++++++++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 9 ++ 6 files changed, 132 insertions(+), 96 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index bc2cf837aa9f..66e3873155f0 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -212,12 +212,6 @@ struct nvkm_gsp { const struct nvkm_gsp_rm { const struct nvkm_rm_api *api; - void *(*rm_alloc_get)(struct nvkm_gsp_object *, u32 oclass, u32 argc); - void *(*rm_alloc_push)(struct nvkm_gsp_object *, void *argv); - void (*rm_alloc_done)(struct nvkm_gsp_object *, void *repv); - - int (*rm_free)(struct nvkm_gsp_object *); - int (*client_ctor)(struct nvkm_gsp *, struct nvkm_gsp_client *); void (*client_dtor)(struct nvkm_gsp_client *); @@ -364,7 +358,7 @@ nvkm_gsp_rm_alloc_get(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u3 object->parent = parent; object->handle = handle; - argv = gsp->rm->rm_alloc_get(object, oclass, argc); + argv = gsp->rm->api->alloc->get(object, oclass, argc); if (IS_ERR_OR_NULL(argv)) { object->client = NULL; return argv; @@ -376,7 +370,7 @@ nvkm_gsp_rm_alloc_get(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u3 static inline void * nvkm_gsp_rm_alloc_push(struct nvkm_gsp_object *object, void *argv) { - void *repv = object->client->gsp->rm->rm_alloc_push(object, argv); + void *repv = object->client->gsp->rm->api->alloc->push(object, argv); if (IS_ERR(repv)) object->client = NULL; @@ -398,7 +392,7 @@ nvkm_gsp_rm_alloc_wr(struct nvkm_gsp_object *object, void *argv) static inline void nvkm_gsp_rm_alloc_done(struct nvkm_gsp_object *object, void *repv) { - object->client->gsp->rm->rm_alloc_done(object, repv); + object->client->gsp->rm->api->alloc->done(object, repv); } static inline int @@ -416,8 +410,11 @@ nvkm_gsp_rm_alloc(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u32 ar static inline int nvkm_gsp_rm_free(struct nvkm_gsp_object *object) { - if (object->client) - return object->client->gsp->rm->rm_free(object); + if (object->client) { + int ret = object->client->gsp->rm->api->alloc->free(object); + object->client = NULL; + return ret; + } return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index 4797a92708e9..e9be8c2ef07e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -226,95 +226,10 @@ r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) return 0; } -static int -r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object) -{ - struct nvkm_gsp_client *client = object->client; - struct nvkm_gsp *gsp = client->gsp; - rpc_free_v03_00 *rpc; - - nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n", - client->object.handle, object->handle); - - rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc)); - if (WARN_ON(IS_ERR_OR_NULL(rpc))) - return -EIO; - - rpc->params.hRoot = client->object.handle; - rpc->params.hObjectParent = 0; - rpc->params.hObjectOld = object->handle; - return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); -} - -static void -r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *params) -{ - rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc); - - nvkm_gsp_rpc_done(object->client->gsp, rpc); -} - -static void * -r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params) -{ - rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc); - struct nvkm_gsp *gsp = object->client->gsp; - void *ret = NULL; - - rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, sizeof(*rpc)); - if (IS_ERR_OR_NULL(rpc)) - return rpc; - - if (rpc->status) { - ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status)); - if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY) - nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status); - } - - nvkm_gsp_rpc_done(gsp, rpc); - - return ret; -} - -static void * -r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, - u32 params_size) -{ - struct nvkm_gsp_client *client = object->client; - struct nvkm_gsp *gsp = client->gsp; - rpc_gsp_rm_alloc_v03_00 *rpc; - - nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x\n", - client->object.handle, object->parent->handle, - object->handle); - - nvkm_debug(&gsp->subdev, "cls:0x%08x params_size:%d\n", oclass, - params_size); - - rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, - sizeof(*rpc) + params_size); - if (IS_ERR(rpc)) - return rpc; - - rpc->hClient = client->object.handle; - rpc->hParent = object->parent->handle; - rpc->hObject = object->handle; - rpc->hClass = oclass; - rpc->status = 0; - rpc->paramsSize = params_size; - return rpc->params; -} - const struct nvkm_gsp_rm r535_gsp_rm = { .api = &r535_rm, - .rm_alloc_get = r535_gsp_rpc_rm_alloc_get, - .rm_alloc_push = r535_gsp_rpc_rm_alloc_push, - .rm_alloc_done = r535_gsp_rpc_rm_alloc_done, - - .rm_free = r535_gsp_rpc_rm_free, - .client_ctor = r535_gsp_client_ctor, .client_dtor = r535_gsp_client_dtor, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild index c8d7419b754f..48b432c9005d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild @@ -5,3 +5,4 @@ nvkm-y += nvkm/subdev/gsp/rm/r535/rm.o nvkm-y += nvkm/subdev/gsp/rm/r535/rpc.o nvkm-y += nvkm/subdev/gsp/rm/r535/ctrl.o +nvkm-y += nvkm/subdev/gsp/rm/r535/alloc.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c new file mode 100644 index 000000000000..968fb7e01b46 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c @@ -0,0 +1,113 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#include +#include +#include + +static int +r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object) +{ + struct nvkm_gsp_client *client = object->client; + struct nvkm_gsp *gsp = client->gsp; + rpc_free_v03_00 *rpc; + + nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n", + client->object.handle, object->handle); + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc)); + if (WARN_ON(IS_ERR_OR_NULL(rpc))) + return -EIO; + + rpc->params.hRoot = client->object.handle; + rpc->params.hObjectParent = 0; + rpc->params.hObjectOld = object->handle; + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); +} + +static void +r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *params) +{ + rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc); + + nvkm_gsp_rpc_done(object->client->gsp, rpc); +} + +static void * +r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params) +{ + rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc); + struct nvkm_gsp *gsp = object->client->gsp; + void *ret = NULL; + + rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, sizeof(*rpc)); + if (IS_ERR_OR_NULL(rpc)) + return rpc; + + if (rpc->status) { + ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status)); + if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY) + nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status); + } + + nvkm_gsp_rpc_done(gsp, rpc); + + return ret; +} + +static void * +r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, + u32 params_size) +{ + struct nvkm_gsp_client *client = object->client; + struct nvkm_gsp *gsp = client->gsp; + rpc_gsp_rm_alloc_v03_00 *rpc; + + nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x\n", + client->object.handle, object->parent->handle, + object->handle); + + nvkm_debug(&gsp->subdev, "cls:0x%08x params_size:%d\n", oclass, + params_size); + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, + sizeof(*rpc) + params_size); + if (IS_ERR(rpc)) + return rpc; + + rpc->hClient = client->object.handle; + rpc->hParent = object->parent->handle; + rpc->hObject = object->handle; + rpc->hClass = oclass; + rpc->status = 0; + rpc->paramsSize = params_size; + return rpc->params; +} + +const struct nvkm_rm_api_alloc +r535_alloc = { + .get = r535_gsp_rpc_rm_alloc_get, + .push = r535_gsp_rpc_rm_alloc_push, + .done = r535_gsp_rpc_rm_alloc_done, + .free = r535_gsp_rpc_rm_free, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c index a3ee277a999d..f6fcd89ec502 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c @@ -8,4 +8,5 @@ const struct nvkm_rm_api r535_rm = { .rpc = &r535_rpc, .ctrl = &r535_ctrl, + .alloc = &r535_alloc, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 9558fbb59ae4..6d0eb8e202ca 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -19,9 +19,18 @@ struct nvkm_rm_api { int (*push)(struct nvkm_gsp_object *, void **params, u32 repc); void (*done)(struct nvkm_gsp_object *, void *params); } *ctrl; + + const struct nvkm_rm_api_alloc { + void *(*get)(struct nvkm_gsp_object *, u32 oclass, u32 params_size); + void *(*push)(struct nvkm_gsp_object *, void *params); + void (*done)(struct nvkm_gsp_object *, void *params); + + int (*free)(struct nvkm_gsp_object *); + } *alloc; }; extern const struct nvkm_rm_api r535_rm; extern const struct nvkm_rm_api_rpc r535_rpc; extern const struct nvkm_rm_api_ctrl r535_ctrl; +extern const struct nvkm_rm_api_alloc r535_alloc; #endif -- cgit v1.2.3 From 45a78c640510af520c26737d10ad5eacb3d72841 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:37 +1000 Subject: drm/nouveau/gsp: split client handling out on its own Split NV01_ROOT handling out into its own module. Aside from moving the function pointers, no code change is intended. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h | 7 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 53 -------------- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild | 1 + .../drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c | 80 ++++++++++++++++++++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 6 ++ 6 files changed, 90 insertions(+), 58 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 66e3873155f0..2c3d8fd04516 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -212,9 +212,6 @@ struct nvkm_gsp { const struct nvkm_gsp_rm { const struct nvkm_rm_api *api; - int (*client_ctor)(struct nvkm_gsp *, struct nvkm_gsp_client *); - void (*client_dtor)(struct nvkm_gsp_client *); - int (*device_ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *); void (*device_dtor)(struct nvkm_gsp_device *); @@ -425,14 +422,14 @@ nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) if (WARN_ON(!gsp->rm)) return -ENOSYS; - return gsp->rm->client_ctor(gsp, client); + return gsp->rm->api->client->ctor(gsp, client); } static inline void nvkm_gsp_client_dtor(struct nvkm_gsp_client *client) { if (client->gsp) - client->gsp->rm->client_dtor(client); + client->gsp->rm->api->client->dtor(client); } static inline int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index e9be8c2ef07e..c6fce9541c0e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -33,7 +33,6 @@ #include #include -#include #include #include #include @@ -177,62 +176,10 @@ r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *dev return ret; } -static void -r535_gsp_client_dtor(struct nvkm_gsp_client *client) -{ - struct nvkm_gsp *gsp = client->gsp; - - nvkm_gsp_rm_free(&client->object); - - mutex_lock(&gsp->client_id.mutex); - idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff); - mutex_unlock(&gsp->client_id.mutex); - - client->gsp = NULL; -} - -static int -r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) -{ - NV0000_ALLOC_PARAMETERS *args; - int ret; - - mutex_lock(&gsp->client_id.mutex); - ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL); - mutex_unlock(&gsp->client_id.mutex); - if (ret < 0) - return ret; - - client->gsp = gsp; - client->object.client = client; - INIT_LIST_HEAD(&client->events); - - args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args), - &client->object); - if (IS_ERR(args)) { - r535_gsp_client_dtor(client); - return ret; - } - - args->hClient = client->object.handle; - args->processID = ~0; - - ret = nvkm_gsp_rm_alloc_wr(&client->object, args); - if (ret) { - r535_gsp_client_dtor(client); - return ret; - } - - return 0; -} - const struct nvkm_gsp_rm r535_gsp_rm = { .api = &r535_rm, - .client_ctor = r535_gsp_client_ctor, - .client_dtor = r535_gsp_client_dtor, - .device_ctor = r535_gsp_device_ctor, .device_dtor = r535_gsp_device_dtor, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild index 48b432c9005d..f25b438fa3d9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild @@ -6,3 +6,4 @@ nvkm-y += nvkm/subdev/gsp/rm/r535/rm.o nvkm-y += nvkm/subdev/gsp/rm/r535/rpc.o nvkm-y += nvkm/subdev/gsp/rm/r535/ctrl.o nvkm-y += nvkm/subdev/gsp/rm/r535/alloc.o +nvkm-y += nvkm/subdev/gsp/rm/r535/client.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c new file mode 100644 index 000000000000..7a2da37af283 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c @@ -0,0 +1,80 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#include +#include + +static void +r535_gsp_client_dtor(struct nvkm_gsp_client *client) +{ + struct nvkm_gsp *gsp = client->gsp; + + nvkm_gsp_rm_free(&client->object); + + mutex_lock(&gsp->client_id.mutex); + idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff); + mutex_unlock(&gsp->client_id.mutex); + + client->gsp = NULL; +} + +static int +r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) +{ + NV0000_ALLOC_PARAMETERS *args; + int ret; + + mutex_lock(&gsp->client_id.mutex); + ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL); + mutex_unlock(&gsp->client_id.mutex); + if (ret < 0) + return ret; + + client->gsp = gsp; + client->object.client = client; + INIT_LIST_HEAD(&client->events); + + args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args), + &client->object); + if (IS_ERR(args)) { + r535_gsp_client_dtor(client); + return ret; + } + + args->hClient = client->object.handle; + args->processID = ~0; + + ret = nvkm_gsp_rm_alloc_wr(&client->object, args); + if (ret) { + r535_gsp_client_dtor(client); + return ret; + } + + return 0; +} + +const struct nvkm_rm_api_client +r535_client = { + .ctor = r535_gsp_client_ctor, + .dtor = r535_gsp_client_dtor, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c index f6fcd89ec502..dba1d2058b37 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c @@ -9,4 +9,5 @@ r535_rm = { .rpc = &r535_rpc, .ctrl = &r535_ctrl, .alloc = &r535_alloc, + .client = &r535_client, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 6d0eb8e202ca..c3341631fbd5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -27,10 +27,16 @@ struct nvkm_rm_api { int (*free)(struct nvkm_gsp_object *); } *alloc; + + const struct nvkm_rm_api_client { + int (*ctor)(struct nvkm_gsp *, struct nvkm_gsp_client *); + void (*dtor)(struct nvkm_gsp_client *); + } *client; }; extern const struct nvkm_rm_api r535_rm; extern const struct nvkm_rm_api_rpc r535_rpc; extern const struct nvkm_rm_api_ctrl r535_ctrl; extern const struct nvkm_rm_api_alloc r535_alloc; +extern const struct nvkm_rm_api_client r535_client; #endif -- cgit v1.2.3 From f964336483159da1f4ca67d7abef3d82e725ed36 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:37 +1000 Subject: drm/nouveau/gsp: split device handling out on its own Split handling of NV01_DEVICE (and other related objects) out into its own module. Aside from moving the function pointers, no code change is intended. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h | 17 +-- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 127 ----------------- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild | 1 + .../drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c | 153 +++++++++++++++++++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 12 ++ 6 files changed, 173 insertions(+), 138 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 2c3d8fd04516..3fd279be8340 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -211,13 +211,6 @@ struct nvkm_gsp { const struct nvkm_gsp_rm { const struct nvkm_rm_api *api; - - int (*device_ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *); - void (*device_dtor)(struct nvkm_gsp_device *); - - int (*event_ctor)(struct nvkm_gsp_device *, u32 handle, u32 id, - nvkm_gsp_event_func, struct nvkm_gsp_event *); - void (*event_dtor)(struct nvkm_gsp_event *); } *rm; struct { @@ -435,14 +428,14 @@ nvkm_gsp_client_dtor(struct nvkm_gsp_client *client) static inline int nvkm_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device) { - return client->gsp->rm->device_ctor(client, device); + return client->gsp->rm->api->device->ctor(client, device); } static inline void nvkm_gsp_device_dtor(struct nvkm_gsp_device *device) { if (device->object.client) - device->object.client->gsp->rm->device_dtor(device); + device->object.client->gsp->rm->api->device->dtor(device); } static inline int @@ -474,7 +467,9 @@ static inline int nvkm_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id, nvkm_gsp_event_func func, struct nvkm_gsp_event *event) { - return device->object.client->gsp->rm->event_ctor(device, handle, id, func, event); + const struct nvkm_gsp_rm *rm = device->object.client->gsp->rm; + + return rm->api->device->event.ctor(device, handle, id, func, event); } static inline void @@ -483,7 +478,7 @@ nvkm_gsp_event_dtor(struct nvkm_gsp_event *event) struct nvkm_gsp_device *device = event->device; if (device) - device->object.client->gsp->rm->event_dtor(event); + device->object.client->gsp->rm->api->device->event.dtor(event); } int nvkm_gsp_intr_stall(struct nvkm_gsp *, enum nvkm_subdev_type, int); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index c6fce9541c0e..c292f5a4e1ec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -33,20 +33,14 @@ #include #include -#include -#include -#include -#include #include #include -#include #include #include #include #include #include #include -#include #include #include #include @@ -61,130 +55,9 @@ extern struct dentry *nouveau_debugfs_root; -static void -r535_gsp_event_dtor(struct nvkm_gsp_event *event) -{ - struct nvkm_gsp_device *device = event->device; - struct nvkm_gsp_client *client = device->object.client; - struct nvkm_gsp *gsp = client->gsp; - - mutex_lock(&gsp->client_id.mutex); - if (event->func) { - list_del(&event->head); - event->func = NULL; - } - mutex_unlock(&gsp->client_id.mutex); - - nvkm_gsp_rm_free(&event->object); - event->device = NULL; -} - -static int -r535_gsp_device_event_get(struct nvkm_gsp_event *event) -{ - struct nvkm_gsp_device *device = event->device; - NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice, - NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->event = event->id; - ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; - return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl); -} - -static int -r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id, - nvkm_gsp_event_func func, struct nvkm_gsp_event *event) -{ - struct nvkm_gsp_client *client = device->object.client; - struct nvkm_gsp *gsp = client->gsp; - NV0005_ALLOC_PARAMETERS *args; - int ret; - - args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle, - NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args), - &event->object); - if (IS_ERR(args)) - return PTR_ERR(args); - - args->hParentClient = client->object.handle; - args->hSrcResource = 0; - args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX; - args->notifyIndex = NV01_EVENT_CLIENT_RM | id; - args->data = NULL; - - ret = nvkm_gsp_rm_alloc_wr(&event->object, args); - if (ret) - return ret; - - event->device = device; - event->id = id; - - ret = r535_gsp_device_event_get(event); - if (ret) { - nvkm_gsp_event_dtor(event); - return ret; - } - - mutex_lock(&gsp->client_id.mutex); - event->func = func; - list_add(&event->head, &client->events); - mutex_unlock(&gsp->client_id.mutex); - return 0; -} - -static void -r535_gsp_device_dtor(struct nvkm_gsp_device *device) -{ - nvkm_gsp_rm_free(&device->subdevice); - nvkm_gsp_rm_free(&device->object); -} - -static int -r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device) -{ - NV2080_ALLOC_PARAMETERS *args; - - return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args), - &device->subdevice); -} - -static int -r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device) -{ - NV0080_ALLOC_PARAMETERS *args; - int ret; - - args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args), - &device->object); - if (IS_ERR(args)) - return PTR_ERR(args); - - args->hClientShare = client->object.handle; - - ret = nvkm_gsp_rm_alloc_wr(&device->object, args); - if (ret) - return ret; - - ret = r535_gsp_subdevice_ctor(device); - if (ret) - nvkm_gsp_rm_free(&device->object); - - return ret; -} - const struct nvkm_gsp_rm r535_gsp_rm = { .api = &r535_rm, - - .device_ctor = r535_gsp_device_ctor, - .device_dtor = r535_gsp_device_dtor, - - .event_ctor = r535_gsp_device_event_ctor, - .event_dtor = r535_gsp_event_dtor, }; static void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild index f25b438fa3d9..d50f2c351d93 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild @@ -7,3 +7,4 @@ nvkm-y += nvkm/subdev/gsp/rm/r535/rpc.o nvkm-y += nvkm/subdev/gsp/rm/r535/ctrl.o nvkm-y += nvkm/subdev/gsp/rm/r535/alloc.o nvkm-y += nvkm/subdev/gsp/rm/r535/client.o +nvkm-y += nvkm/subdev/gsp/rm/r535/device.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c new file mode 100644 index 000000000000..09173ca1c050 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c @@ -0,0 +1,153 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#include +#include +#include +#include +#include +#include +#include + +static void +r535_gsp_event_dtor(struct nvkm_gsp_event *event) +{ + struct nvkm_gsp_device *device = event->device; + struct nvkm_gsp_client *client = device->object.client; + struct nvkm_gsp *gsp = client->gsp; + + mutex_lock(&gsp->client_id.mutex); + if (event->func) { + list_del(&event->head); + event->func = NULL; + } + mutex_unlock(&gsp->client_id.mutex); + + nvkm_gsp_rm_free(&event->object); + event->device = NULL; +} + +static int +r535_gsp_device_event_get(struct nvkm_gsp_event *event) +{ + struct nvkm_gsp_device *device = event->device; + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->event = event->id; + ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl); +} + +static int +r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id, + nvkm_gsp_event_func func, struct nvkm_gsp_event *event) +{ + struct nvkm_gsp_client *client = device->object.client; + struct nvkm_gsp *gsp = client->gsp; + NV0005_ALLOC_PARAMETERS *args; + int ret; + + args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle, + NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args), + &event->object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->hParentClient = client->object.handle; + args->hSrcResource = 0; + args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX; + args->notifyIndex = NV01_EVENT_CLIENT_RM | id; + args->data = NULL; + + ret = nvkm_gsp_rm_alloc_wr(&event->object, args); + if (ret) + return ret; + + event->device = device; + event->id = id; + + ret = r535_gsp_device_event_get(event); + if (ret) { + nvkm_gsp_event_dtor(event); + return ret; + } + + mutex_lock(&gsp->client_id.mutex); + event->func = func; + list_add(&event->head, &client->events); + mutex_unlock(&gsp->client_id.mutex); + return 0; +} + +static void +r535_gsp_device_dtor(struct nvkm_gsp_device *device) +{ + nvkm_gsp_rm_free(&device->subdevice); + nvkm_gsp_rm_free(&device->object); +} + +static int +r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device) +{ + NV2080_ALLOC_PARAMETERS *args; + + return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args), + &device->subdevice); +} + +static int +r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device) +{ + NV0080_ALLOC_PARAMETERS *args; + int ret; + + args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args), + &device->object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->hClientShare = client->object.handle; + + ret = nvkm_gsp_rm_alloc_wr(&device->object, args); + if (ret) + return ret; + + ret = r535_gsp_subdevice_ctor(device); + if (ret) + nvkm_gsp_rm_free(&device->object); + + return ret; +} + +const struct nvkm_rm_api_device +r535_device = { + .ctor = r535_gsp_device_ctor, + .dtor = r535_gsp_device_dtor, + .event.ctor = r535_gsp_device_event_ctor, + .event.dtor = r535_gsp_event_dtor, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c index dba1d2058b37..39cc3d0c740c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c @@ -10,4 +10,5 @@ r535_rm = { .ctrl = &r535_ctrl, .alloc = &r535_alloc, .client = &r535_client, + .device = &r535_device, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index c3341631fbd5..20841305fa55 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -32,6 +32,17 @@ struct nvkm_rm_api { int (*ctor)(struct nvkm_gsp *, struct nvkm_gsp_client *); void (*dtor)(struct nvkm_gsp_client *); } *client; + + const struct nvkm_rm_api_device { + int (*ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *); + void (*dtor)(struct nvkm_gsp_device *); + + struct { + int (*ctor)(struct nvkm_gsp_device *, u32 handle, u32 id, + nvkm_gsp_event_func, struct nvkm_gsp_event *); + void (*dtor)(struct nvkm_gsp_event *); + } event; + } *device; }; extern const struct nvkm_rm_api r535_rm; @@ -39,4 +50,5 @@ extern const struct nvkm_rm_api_rpc r535_rpc; extern const struct nvkm_rm_api_ctrl r535_ctrl; extern const struct nvkm_rm_api_alloc r535_alloc; extern const struct nvkm_rm_api_client r535_client; +extern const struct nvkm_rm_api_device r535_device; #endif -- cgit v1.2.3 From 7f022236b55be48b46e9a2041b2fbca6acf1a71b Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 4 Feb 2025 08:38:19 +1000 Subject: drm/nouveau/gsp: move firmware loading to GPU-specific code GH100/GBxxx use a slightly different set of firmwares to boot GSP-RM. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c | 8 ++- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c | 23 ++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c | 4 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c | 8 ++- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h | 12 +++- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 78 +------------------------ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c | 52 ++++++++++++++++- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c | 5 +- 8 files changed, 108 insertions(+), 82 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c index c849c6299c52..221ea0fd8a51 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c @@ -45,7 +45,7 @@ ad102_gsp_r535_113_01 = { static struct nvkm_gsp_fwif ad102_gsps[] = { - { 0, r535_gsp_load, &ad102_gsp_r535_113_01, "535.113.01", true }, + { 0, tu102_gsp_load, &ad102_gsp_r535_113_01, "535.113.01", true }, {} }; @@ -55,3 +55,9 @@ ad102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, { return nvkm_gsp_new_(ad102_gsps, device, type, inst, pgsp); } + +NVKM_GSP_FIRMWARE_BOOTER(ad102, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ad103, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ad104, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ad106, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ad107, 535.113.01); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c index da1bebb896f7..fe1cef1b6324 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c @@ -80,6 +80,19 @@ nvkm_gsp_oneinit(struct nvkm_subdev *subdev) return gsp->func->oneinit(gsp); } +void +nvkm_gsp_dtor_fws(struct nvkm_gsp *gsp) +{ + nvkm_firmware_put(gsp->fws.bl); + gsp->fws.bl = NULL; + nvkm_firmware_put(gsp->fws.booter.unload); + gsp->fws.booter.unload = NULL; + nvkm_firmware_put(gsp->fws.booter.load); + gsp->fws.booter.load = NULL; + nvkm_firmware_put(gsp->fws.rm); + gsp->fws.rm = NULL; +} + static void * nvkm_gsp_dtor(struct nvkm_subdev *subdev) { @@ -100,6 +113,16 @@ nvkm_gsp = { .fini = nvkm_gsp_fini, }; +int +nvkm_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver, + const struct firmware **pfw) +{ + char fwname[64]; + + snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver); + return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw); +} + int nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gsp **pgsp) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c index 223f68b532ef..c3be72d3390a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c @@ -61,7 +61,7 @@ ga100_gsp_r535_113_01 = { static struct nvkm_gsp_fwif ga100_gsps[] = { - { 0, r535_gsp_load, &ga100_gsp_r535_113_01, "535.113.01" }, + { 0, tu102_gsp_load, &ga100_gsp_r535_113_01, "535.113.01" }, { -1, gv100_gsp_nofw, &gv100_gsp }, {} }; @@ -72,3 +72,5 @@ ga100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, { return nvkm_gsp_new_(ga100_gsps, device, type, inst, pgsp); } + +NVKM_GSP_FIRMWARE_BOOTER(ga100, 535.113.01); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c index 4c4b4168a266..d7385d3989f1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c @@ -178,7 +178,7 @@ ga102_gsp = { static struct nvkm_gsp_fwif ga102_gsps[] = { - { 0, r535_gsp_load, &ga102_gsp_r535_113_01, "535.113.01" }, + { 0, tu102_gsp_load, &ga102_gsp_r535_113_01, "535.113.01" }, { -1, gv100_gsp_nofw, &ga102_gsp }, {} }; @@ -189,3 +189,9 @@ ga102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, { return nvkm_gsp_new_(ga102_gsps, device, type, inst, pgsp); } + +NVKM_GSP_FIRMWARE_BOOTER(ga102, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ga103, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ga104, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ga106, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ga107, 535.113.01); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h index 9f4a62375a27..601dd5d503bb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -15,8 +15,18 @@ struct nvkm_gsp_fwif { bool enable; }; +int nvkm_gsp_load_fw(struct nvkm_gsp *, const char *name, const char *ver, + const struct firmware **); +void nvkm_gsp_dtor_fws(struct nvkm_gsp *); + int gv100_gsp_nofw(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *); -int r535_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *); +int tu102_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *); + +#define NVKM_GSP_FIRMWARE_BOOTER(chip,vers) \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-"#vers".bin"); \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-"#vers".bin"); \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-"#vers".bin"); \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-"#vers".bin") struct nvkm_gsp_func { const struct nvkm_falcon_func *flcn; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index c292f5a4e1ec..9d5b77b3cb2f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -1960,19 +1960,6 @@ r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u return -ENOENT; } -static void -r535_gsp_dtor_fws(struct nvkm_gsp *gsp) -{ - nvkm_firmware_put(gsp->fws.bl); - gsp->fws.bl = NULL; - nvkm_firmware_put(gsp->fws.booter.unload); - gsp->fws.booter.unload = NULL; - nvkm_firmware_put(gsp->fws.booter.load); - gsp->fws.booter.load = NULL; - nvkm_firmware_put(gsp->fws.rm); - gsp->fws.rm = NULL; -} - #ifdef CONFIG_DEBUG_FS struct r535_gsp_log { @@ -2209,7 +2196,7 @@ r535_gsp_dtor(struct nvkm_gsp *gsp) mutex_destroy(&gsp->msgq.mutex); mutex_destroy(&gsp->cmdq.mutex); - r535_gsp_dtor_fws(gsp); + nvkm_gsp_dtor_fws(gsp); nvkm_gsp_mem_dtor(&gsp->rmargs); nvkm_gsp_mem_dtor(&gsp->wpr_meta); @@ -2284,7 +2271,7 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) return ret; /* Release FW images - we've copied them to DMA buffers now. */ - r535_gsp_dtor_fws(gsp); + nvkm_gsp_dtor_fws(gsp); /* Calculate FB layout. */ gsp->fb.wpr2.frts.size = 0x100000; @@ -2349,64 +2336,3 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) idr_init(&gsp->client_id.idr); return 0; } - -static int -r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver, - const struct firmware **pfw) -{ - char fwname[64]; - - snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver); - return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw); -} - -int -r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) -{ - struct nvkm_subdev *subdev = &gsp->subdev; - int ret; - bool enable_gsp = fwif->enable; - -#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT) - enable_gsp = true; -#endif - if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp)) - return -EINVAL; - - if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) || - (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) || - (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) || - (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) { - r535_gsp_dtor_fws(gsp); - return ret; - } - - return 0; -} - -#define NVKM_GSP_FIRMWARE(chip) \ -MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin"); \ -MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \ -MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin"); \ -MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin") - -NVKM_GSP_FIRMWARE(tu102); -NVKM_GSP_FIRMWARE(tu104); -NVKM_GSP_FIRMWARE(tu106); - -NVKM_GSP_FIRMWARE(tu116); -NVKM_GSP_FIRMWARE(tu117); - -NVKM_GSP_FIRMWARE(ga100); - -NVKM_GSP_FIRMWARE(ga102); -NVKM_GSP_FIRMWARE(ga103); -NVKM_GSP_FIRMWARE(ga104); -NVKM_GSP_FIRMWARE(ga106); -NVKM_GSP_FIRMWARE(ga107); - -NVKM_GSP_FIRMWARE(ad102); -NVKM_GSP_FIRMWARE(ad103); -NVKM_GSP_FIRMWARE(ad104); -NVKM_GSP_FIRMWARE(ad106); -NVKM_GSP_FIRMWARE(ad107); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c index 59c5f2b9172a..e7396344cfdf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c @@ -183,9 +183,55 @@ tu102_gsp_r535_113_01 = { .rm = &r535_gsp_rm, }; +static int +tu102_gsp_load_rm(struct nvkm_gsp *gsp, const struct nvkm_gsp_fwif *fwif) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + bool enable_gsp = fwif->enable; + int ret; + +#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT) + enable_gsp = true; +#endif + if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp)) + return -EINVAL; + + ret = nvkm_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm); + if (ret) + return ret; + + ret = nvkm_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl); + if (ret) + return ret; + + return 0; +} + +int +tu102_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) +{ + int ret; + + ret = tu102_gsp_load_rm(gsp, fwif); + if (ret) + goto done; + + ret = nvkm_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load); + if (ret) + goto done; + + ret = nvkm_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload); + +done: + if (ret) + nvkm_gsp_dtor_fws(gsp); + + return ret; +} + static struct nvkm_gsp_fwif tu102_gsps[] = { - { 0, r535_gsp_load, &tu102_gsp_r535_113_01, "535.113.01" }, + { 0, tu102_gsp_load, &tu102_gsp_r535_113_01, "535.113.01" }, { -1, gv100_gsp_nofw, &gv100_gsp }, {} }; @@ -196,3 +242,7 @@ tu102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, { return nvkm_gsp_new_(tu102_gsps, device, type, inst, pgsp); } + +NVKM_GSP_FIRMWARE_BOOTER(tu102, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(tu104, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(tu106, 535.113.01); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c index 04fbd9ed28b1..a8c9480b8024 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c @@ -44,7 +44,7 @@ tu116_gsp_r535_113_01 = { static struct nvkm_gsp_fwif tu116_gsps[] = { - { 0, r535_gsp_load, &tu116_gsp_r535_113_01, "535.113.01" }, + { 0, tu102_gsp_load, &tu116_gsp_r535_113_01, "535.113.01" }, { -1, gv100_gsp_nofw, &gv100_gsp }, {} }; @@ -55,3 +55,6 @@ tu116_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, { return nvkm_gsp_new_(tu116_gsps, device, type, inst, pgsp); } + +NVKM_GSP_FIRMWARE_BOOTER(tu116, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(tu117, 535.113.01); -- cgit v1.2.3 From 594766ca3e5398b861cdd71687db82e483f67edb Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 22 Jan 2025 21:21:03 +1000 Subject: drm/nouveau/gsp: move booter handling to GPU-specific code GH100/GBxxx have significant changes to the GSP-RM boot process. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c | 4 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c | 4 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c | 4 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h | 3 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 94 +------------------- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c | 113 +++++++++++++++++++++++- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c | 4 +- 8 files changed, 125 insertions(+), 103 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c index 221ea0fd8a51..ea2821e7a54e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c @@ -36,8 +36,8 @@ ad102_gsp_r535_113_01 = { .dtor = r535_gsp_dtor, .oneinit = tu102_gsp_oneinit, - .init = r535_gsp_init, - .fini = r535_gsp_fini, + .init = tu102_gsp_init, + .fini = tu102_gsp_fini, .reset = ga102_gsp_reset, .rm = &r535_gsp_rm, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c index fe1cef1b6324..78f2a15f0d42 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c @@ -52,7 +52,7 @@ nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_gsp *gsp = nvkm_gsp(subdev); - if (!gsp->func->fini) + if (!gsp->func->fini || !gsp->running) return 0; return gsp->func->fini(gsp, suspend); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c index c3be72d3390a..d9cdec4810b4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c @@ -52,8 +52,8 @@ ga100_gsp_r535_113_01 = { .dtor = r535_gsp_dtor, .oneinit = tu102_gsp_oneinit, - .init = r535_gsp_init, - .fini = r535_gsp_fini, + .init = tu102_gsp_init, + .fini = tu102_gsp_fini, .reset = tu102_gsp_reset, .rm = &r535_gsp_rm, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c index d7385d3989f1..7b8db70f3cb3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c @@ -164,8 +164,8 @@ ga102_gsp_r535_113_01 = { .dtor = r535_gsp_dtor, .oneinit = tu102_gsp_oneinit, - .init = r535_gsp_init, - .fini = r535_gsp_fini, + .init = tu102_gsp_init, + .fini = tu102_gsp_fini, .reset = ga102_gsp_reset, .rm = &r535_gsp_rm, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h index 601dd5d503bb..e6f0e865848a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -59,6 +59,8 @@ extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec; int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *, struct nvkm_falcon *, struct nvkm_falcon_fw *); int tu102_gsp_oneinit(struct nvkm_gsp *); +int tu102_gsp_init(struct nvkm_gsp *); +int tu102_gsp_fini(struct nvkm_gsp *, bool suspend); int tu102_gsp_reset(struct nvkm_gsp *); extern const struct nvkm_falcon_func ga102_gsp_flcn; @@ -72,6 +74,7 @@ int r535_gsp_oneinit(struct nvkm_gsp *); int r535_gsp_init(struct nvkm_gsp *); int r535_gsp_fini(struct nvkm_gsp *, bool suspend); extern const struct nvkm_gsp_rm r535_gsp_rm; +int r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume); int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index 9d5b77b3cb2f..f42879b2ea7e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -1145,48 +1145,6 @@ r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc) return 0; } -static int -r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) -{ - struct nvkm_subdev *subdev = &gsp->subdev; - struct nvkm_device *device = subdev->device; - u32 wpr2_hi; - int ret; - - wpr2_hi = nvkm_rd32(device, 0x1fa828); - if (!wpr2_hi) { - nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n"); - return 0; - } - - ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); - if (WARN_ON(ret)) - return ret; - - wpr2_hi = nvkm_rd32(device, 0x1fa828); - if (WARN_ON(wpr2_hi)) - return -EIO; - - return 0; -} - -static int -r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) -{ - int ret; - - ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); - if (ret) - return ret; - - nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); - - if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) - return -EIO; - - return 0; -} - static int r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp) { @@ -1287,7 +1245,7 @@ r535_gsp_shared_init(struct nvkm_gsp *gsp) return 0; } -static int +int r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume) { GSP_ARGUMENTS_CACHED *args; @@ -1816,12 +1774,8 @@ lvl1_fail: int r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) { - u32 mbox0 = 0xff, mbox1 = 0xff; int ret; - if (!gsp->running) - return 0; - if (suspend) { GspFwWprMeta *meta = gsp->wpr_meta.data; u64 len = meta->gspFwWprEnd - meta->gspFwWprStart; @@ -1844,9 +1798,6 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) sr->revision = GSP_FW_SR_META_REVISION; sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr; sr->sizeOfSuspendResumeData = len; - - mbox0 = lower_32_bits(gsp->sr.meta.addr); - mbox1 = upper_32_bits(gsp->sr.meta.addr); } ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend); @@ -1858,14 +1809,6 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) break; ); - nvkm_falcon_reset(&gsp->falcon); - - ret = nvkm_gsp_fwsec_sb(gsp); - WARN_ON(ret); - - ret = r535_gsp_booter_unload(gsp, mbox0, mbox1); - WARN_ON(ret); - gsp->running = false; return 0; } @@ -1873,23 +1816,12 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) int r535_gsp_init(struct nvkm_gsp *gsp) { - u32 mbox0, mbox1; int ret; - if (!gsp->sr.meta.data) { - mbox0 = lower_32_bits(gsp->wpr_meta.addr); - mbox1 = upper_32_bits(gsp->wpr_meta.addr); - } else { - r535_gsp_rmargs_init(gsp, true); - - mbox0 = lower_32_bits(gsp->sr.meta.addr); - mbox1 = upper_32_bits(gsp->sr.meta.addr); - } + nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); - /* Execute booter to handle (eventually...) booting GSP-RM. */ - ret = r535_gsp_booter_load(gsp, mbox0, mbox1); - if (WARN_ON(ret)) - goto done; + if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) + return -EIO; ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE); if (ret) @@ -2220,16 +2152,6 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) mutex_init(&gsp->cmdq.mutex); mutex_init(&gsp->msgq.mutex); - ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load, - &device->sec2->falcon, &gsp->booter.load); - if (ret) - return ret; - - ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload, - &device->sec2->falcon, &gsp->booter.unload); - if (ret) - return ret; - /* Load GSP firmware from ELF image into DMA-accessible memory. */ ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size); if (ret) @@ -2324,14 +2246,6 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) if (WARN_ON(ret)) return ret; - /* Reset GSP into RISC-V mode. */ - ret = gsp->func->reset(gsp); - if (WARN_ON(ret)) - return ret; - - nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); - nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); - mutex_init(&gsp->client_id.mutex); idr_init(&gsp->client_id.idr); return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c index e7396344cfdf..451d3e588d26 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c @@ -22,11 +22,43 @@ #include "priv.h" #include +#include #include #include #include +static int +tu102_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + u32 wpr2_hi; + int ret; + + wpr2_hi = nvkm_rd32(device, 0x1fa828); + if (!wpr2_hi) { + nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n"); + return 0; + } + + ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); + if (WARN_ON(ret)) + return ret; + + wpr2_hi = nvkm_rd32(device, 0x1fa828); + if (WARN_ON(wpr2_hi)) + return -EIO; + + return 0; +} + +static int +tu102_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) +{ + return nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); +} + int tu102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob, struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw) @@ -114,6 +146,55 @@ tu102_gsp_reset(struct nvkm_gsp *gsp) return gsp->falcon.func->reset_eng(&gsp->falcon); } +int +tu102_gsp_fini(struct nvkm_gsp *gsp, bool suspend) +{ + u32 mbox0 = 0xff, mbox1 = 0xff; + int ret; + + ret = r535_gsp_fini(gsp, suspend); + if (ret && suspend) + return ret; + + nvkm_falcon_reset(&gsp->falcon); + + ret = nvkm_gsp_fwsec_sb(gsp); + WARN_ON(ret); + + if (suspend) { + mbox0 = lower_32_bits(gsp->sr.meta.addr); + mbox1 = upper_32_bits(gsp->sr.meta.addr); + } + + ret = tu102_gsp_booter_unload(gsp, mbox0, mbox1); + WARN_ON(ret); + return 0; +} + +int +tu102_gsp_init(struct nvkm_gsp *gsp) +{ + u32 mbox0, mbox1; + int ret; + + if (!gsp->sr.meta.data) { + mbox0 = lower_32_bits(gsp->wpr_meta.addr); + mbox1 = upper_32_bits(gsp->wpr_meta.addr); + } else { + r535_gsp_rmargs_init(gsp, true); + + mbox0 = lower_32_bits(gsp->sr.meta.addr); + mbox1 = upper_32_bits(gsp->sr.meta.addr); + } + + /* Execute booter to handle (eventually...) booting GSP-RM. */ + ret = tu102_gsp_booter_load(gsp, mbox0, mbox1); + if (WARN_ON(ret)) + return ret; + + return r535_gsp_init(gsp); +} + static u64 tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size) { @@ -136,14 +217,38 @@ tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size) int tu102_gsp_oneinit(struct nvkm_gsp *gsp) { - gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device); + struct nvkm_device *device = gsp->subdev.device; + int ret; + + gsp->fb.size = nvkm_fb_vidmem_size(device); gsp->fb.bios.vga_workspace.addr = tu102_gsp_vga_workspace_addr(gsp, gsp->fb.size); gsp->fb.bios.vga_workspace.size = gsp->fb.size - gsp->fb.bios.vga_workspace.addr; gsp->fb.bios.addr = gsp->fb.bios.vga_workspace.addr; gsp->fb.bios.size = gsp->fb.bios.vga_workspace.size; - return r535_gsp_oneinit(gsp); + ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load, + &device->sec2->falcon, &gsp->booter.load); + if (ret) + return ret; + + ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload, + &device->sec2->falcon, &gsp->booter.unload); + if (ret) + return ret; + + ret = r535_gsp_oneinit(gsp); + if (ret) + return ret; + + /* Reset GSP into RISC-V mode. */ + ret = gsp->func->reset(gsp); + if (ret) + return ret; + + nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); + nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); + return 0; } const struct nvkm_falcon_func @@ -176,8 +281,8 @@ tu102_gsp_r535_113_01 = { .dtor = r535_gsp_dtor, .oneinit = tu102_gsp_oneinit, - .init = r535_gsp_init, - .fini = r535_gsp_fini, + .init = tu102_gsp_init, + .fini = tu102_gsp_fini, .reset = tu102_gsp_reset, .rm = &r535_gsp_rm, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c index a8c9480b8024..1bc806a18010 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c @@ -35,8 +35,8 @@ tu116_gsp_r535_113_01 = { .dtor = r535_gsp_dtor, .oneinit = tu102_gsp_oneinit, - .init = r535_gsp_init, - .fini = r535_gsp_fini, + .init = tu102_gsp_init, + .fini = tu102_gsp_fini, .reset = tu102_gsp_reset, .rm = &r535_gsp_rm, -- cgit v1.2.3 From c472d828348cafa7874816f4ad2a2314d0000419 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:37 +1000 Subject: drm/nouveau/gsp: move subdev/engine impls to subdev/gsp/rm/r535/ Move all the remaining GSP-RM code together underneath a versioned path, to make the code easier to work with when adding support for a newer RM version. Aside from adjusting include paths, no code change is intended. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild | 2 - drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c | 108 - drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild | 2 - drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c | 1725 --------------- drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild | 2 - drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c | 550 ----- drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild | 2 - drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c | 508 ----- drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild | 2 - drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c | 110 - drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild | 2 - drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c | 110 - drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild | 2 - drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c | 107 - drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild | 2 - drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c | 107 - drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild | 2 - drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c | 185 -- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild | 2 - drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 2252 -------------------- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild | 15 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c | 185 ++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c | 108 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c | 1725 +++++++++++++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c | 333 +++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c | 550 +++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c | 508 +++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 2252 ++++++++++++++++++++ .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c | 110 + .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c | 110 + .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c | 107 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c | 107 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c | 123 ++ drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild | 2 - drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c | 333 --- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild | 2 - drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c | 123 -- 37 files changed, 6233 insertions(+), 6242 deletions(-) delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild index 165d61fc5d6c..8bf1635ffabc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild @@ -10,5 +10,3 @@ nvkm-y += nvkm/engine/ce/gv100.o nvkm-y += nvkm/engine/ce/tu102.o nvkm-y += nvkm/engine/ce/ga100.o nvkm-y += nvkm/engine/ce/ga102.o - -nvkm-y += nvkm/engine/ce/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c deleted file mode 100644 index bd0d435dbbd3..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include -#include -#include - -#include -#include -#include - -struct r535_ce_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_ce_obj_dtor(struct nvkm_object *object) -{ - struct r535_ce_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_ce_obj = { - .dtor = r535_ce_obj_dtor, -}; - -static int -r535_ce_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) -{ - struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); - struct r535_ce_obj *obj; - NVC0B5_ALLOCATION_PARAMETERS *args; - - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_ce_obj, oclass, &obj->object); - *pobject = &obj->object; - - args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, - sizeof(*args), &obj->rm); - if (WARN_ON(IS_ERR(args))) - return PTR_ERR(args); - - args->version = 1; - args->engineType = NV2080_ENGINE_TYPE_COPY0 + oclass->engine->subdev.inst; - - return nvkm_gsp_rm_alloc_wr(&obj->rm, args); -} - -static void * -r535_ce_dtor(struct nvkm_engine *engine) -{ - kfree(engine->func); - return engine; -} - -int -r535_ce_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) -{ - struct nvkm_engine_func *rm; - int nclass, ret; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_ce_dtor; - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_ce_obj_ctor; - } - - ret = nvkm_engine_new_(rm, device, type, inst, true, pengine); - if (ret) - kfree(rm); - - return ret; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild index e346e924fee8..23a10e081081 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild @@ -29,8 +29,6 @@ nvkm-y += nvkm/engine/disp/tu102.o nvkm-y += nvkm/engine/disp/ga102.o nvkm-y += nvkm/engine/disp/ad102.o -nvkm-y += nvkm/engine/disp/r535.o - nvkm-y += nvkm/engine/disp/udisp.o nvkm-y += nvkm/engine/disp/uconn.o nvkm-y += nvkm/engine/disp/uoutp.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c deleted file mode 100644 index 99110ab2f44d..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c +++ /dev/null @@ -1,1725 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" -#include "chan.h" -#include "conn.h" -#include "dp.h" -#include "head.h" -#include "ior.h" -#include "outp.h" - -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -static u64 -r535_chan_user(struct nvkm_disp_chan *chan, u64 *psize) -{ - switch (chan->object.oclass & 0xff) { - case 0x7d: *psize = 0x10000; return 0x680000; - case 0x7e: *psize = 0x01000; return 0x690000 + (chan->head * *psize); - case 0x7b: *psize = 0x01000; return 0x6b0000 + (chan->head * *psize); - case 0x7a: *psize = 0x01000; return 0x6d8000 + (chan->head * *psize); - default: - BUG_ON(1); - break; - } - - return 0ULL; -} - -static void -r535_chan_intr(struct nvkm_disp_chan *chan, bool en) -{ -} - -static void -r535_chan_fini(struct nvkm_disp_chan *chan) -{ - nvkm_gsp_rm_free(&chan->rm.object); -} - -static int -r535_chan_push(struct nvkm_disp_chan *chan) -{ - struct nvkm_gsp *gsp = chan->disp->engine.subdev.device->gsp; - NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, - NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - if (chan->memory) { - switch (nvkm_memory_target(chan->memory)) { - case NVKM_MEM_TARGET_NCOH: - ctrl->addressSpace = ADDR_SYSMEM; - ctrl->cacheSnoop = 0; - break; - case NVKM_MEM_TARGET_HOST: - ctrl->addressSpace = ADDR_SYSMEM; - ctrl->cacheSnoop = 1; - break; - case NVKM_MEM_TARGET_VRAM: - ctrl->addressSpace = ADDR_FBMEM; - break; - default: - WARN_ON(1); - return -EINVAL; - } - - ctrl->physicalAddr = nvkm_memory_addr(chan->memory); - ctrl->limit = nvkm_memory_size(chan->memory) - 1; - } - - ctrl->hclass = chan->object.oclass; - ctrl->channelInstance = chan->head; - ctrl->valid = ((chan->object.oclass & 0xff) != 0x7a) ? 1 : 0; - - return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); -} - -static int -r535_curs_init(struct nvkm_disp_chan *chan) -{ - NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *args; - int ret; - - ret = r535_chan_push(chan); - if (ret) - return ret; - - args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object, - (chan->object.oclass << 16) | chan->head, - chan->object.oclass, sizeof(*args), &chan->rm.object); - if (IS_ERR(args)) - return PTR_ERR(args); - - args->channelInstance = chan->head; - - return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args); -} - -static const struct nvkm_disp_chan_func -r535_curs_func = { - .init = r535_curs_init, - .fini = r535_chan_fini, - .intr = r535_chan_intr, - .user = r535_chan_user, -}; - -static const struct nvkm_disp_chan_user -r535_curs = { - .func = &r535_curs_func, - .user = 73, -}; - -static int -r535_dmac_bind(struct nvkm_disp_chan *chan, struct nvkm_object *object, u32 handle) -{ - return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle, - chan->chid.user << 25 | - (chan->disp->rm.client.object.handle & 0x3fff)); -} - -static void -r535_dmac_fini(struct nvkm_disp_chan *chan) -{ - struct nvkm_device *device = chan->disp->engine.subdev.device; - const u32 uoff = (chan->chid.user - 1) * 0x1000; - - chan->suspend_put = nvkm_rd32(device, 0x690000 + uoff); - r535_chan_fini(chan); -} - -static int -r535_dmac_init(struct nvkm_disp_chan *chan) -{ - NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args; - int ret; - - ret = r535_chan_push(chan); - if (ret) - return ret; - - args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object, - (chan->object.oclass << 16) | chan->head, - chan->object.oclass, sizeof(*args), &chan->rm.object); - if (IS_ERR(args)) - return PTR_ERR(args); - - args->channelInstance = chan->head; - args->offset = chan->suspend_put; - - return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args); -} - -static int -r535_dmac_push(struct nvkm_disp_chan *chan, u64 memory) -{ - chan->memory = nvkm_umem_search(chan->object.client, memory); - if (IS_ERR(chan->memory)) - return PTR_ERR(chan->memory); - - return 0; -} - -static const struct nvkm_disp_chan_func -r535_dmac_func = { - .push = r535_dmac_push, - .init = r535_dmac_init, - .fini = r535_dmac_fini, - .intr = r535_chan_intr, - .user = r535_chan_user, - .bind = r535_dmac_bind, -}; - -static const struct nvkm_disp_chan_func -r535_wimm_func = { - .push = r535_dmac_push, - .init = r535_dmac_init, - .fini = r535_dmac_fini, - .intr = r535_chan_intr, - .user = r535_chan_user, -}; - -static const struct nvkm_disp_chan_user -r535_wimm = { - .func = &r535_wimm_func, - .user = 33, -}; - -static const struct nvkm_disp_chan_user -r535_wndw = { - .func = &r535_dmac_func, - .user = 1, -}; - -static void -r535_core_fini(struct nvkm_disp_chan *chan) -{ - struct nvkm_device *device = chan->disp->engine.subdev.device; - - chan->suspend_put = nvkm_rd32(device, 0x680000); - r535_chan_fini(chan); -} - -static const struct nvkm_disp_chan_func -r535_core_func = { - .push = r535_dmac_push, - .init = r535_dmac_init, - .fini = r535_core_fini, - .intr = r535_chan_intr, - .user = r535_chan_user, - .bind = r535_dmac_bind, -}; - -static const struct nvkm_disp_chan_user -r535_core = { - .func = &r535_core_func, - .user = 0, -}; - -static int -r535_sor_bl_set(struct nvkm_ior *sor, int lvl) -{ - struct nvkm_disp *disp = sor->disp; - NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->displayId = BIT(sor->asy.outp->index); - ctrl->brightness = lvl; - - return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); -} - -static int -r535_sor_bl_get(struct nvkm_ior *sor) -{ - struct nvkm_disp *disp = sor->disp; - NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl; - int ret, lvl; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->displayId = BIT(sor->asy.outp->index); - - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; - } - - lvl = ctrl->brightness; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return lvl; -} - -static const struct nvkm_ior_func_bl -r535_sor_bl = { - .get = r535_sor_bl_get, - .set = r535_sor_bl_set, -}; - -static void -r535_sor_hda_eld(struct nvkm_ior *sor, int head, u8 *data, u8 size) -{ - struct nvkm_disp *disp = sor->disp; - NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl; - - if (WARN_ON(size > sizeof(ctrl->bufferELD))) - return; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl)); - if (WARN_ON(IS_ERR(ctrl))) - return; - - ctrl->displayId = BIT(sor->asy.outp->index); - ctrl->numELDSize = size; - memcpy(ctrl->bufferELD, data, size); - ctrl->maxFreqSupported = 0; //XXX - ctrl->ctrl = NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, PD, TRUE); - ctrl->ctrl |= NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, ELDV, TRUE); - ctrl->deviceEntry = head; - - WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); -} - -static void -r535_sor_hda_hpd(struct nvkm_ior *sor, int head, bool present) -{ - struct nvkm_disp *disp = sor->disp; - NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl; - - if (present) - return; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl)); - if (WARN_ON(IS_ERR(ctrl))) - return; - - ctrl->displayId = BIT(sor->asy.outp->index); - ctrl->deviceEntry = head; - - WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); -} - -static const struct nvkm_ior_func_hda -r535_sor_hda = { - .hpd = r535_sor_hda_hpd, - .eld = r535_sor_hda_eld, -}; - -static void -r535_sor_dp_audio_mute(struct nvkm_ior *sor, bool mute) -{ - struct nvkm_disp *disp = sor->disp; - NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM, sizeof(*ctrl)); - if (WARN_ON(IS_ERR(ctrl))) - return; - - ctrl->displayId = BIT(sor->asy.outp->index); - ctrl->mute = mute; - WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); -} - -static void -r535_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable) -{ - struct nvkm_disp *disp = sor->disp; - NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *ctrl; - - if (!enable) - r535_sor_dp_audio_mute(sor, true); - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE, sizeof(*ctrl)); - if (WARN_ON(IS_ERR(ctrl))) - return; - - ctrl->displayId = BIT(sor->asy.outp->index); - ctrl->enable = enable; - WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); - - if (enable) - r535_sor_dp_audio_mute(sor, false); -} - -static void -r535_sor_dp_vcpi(struct nvkm_ior *sor, int head, u8 slot, u8 slot_nr, u16 pbn, u16 aligned_pbn) -{ - struct nvkm_disp *disp = sor->disp; - struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl)); - if (WARN_ON(IS_ERR(ctrl))) - return; - - ctrl->subDeviceInstance = 0; - ctrl->head = head; - ctrl->sorIndex = sor->id; - ctrl->dpLink = sor->asy.link == 2; - ctrl->bEnableOverride = 1; - ctrl->bMST = 1; - ctrl->hBlankSym = 0; - ctrl->vBlankSym = 0; - ctrl->colorFormat = 0; - ctrl->bEnableTwoHeadOneOr = 0; - ctrl->singleHeadMultistreamMode = 0; - ctrl->MST.slotStart = slot; - ctrl->MST.slotEnd = slot + slot_nr - 1; - ctrl->MST.PBN = pbn; - ctrl->MST.Timeslice = aligned_pbn; - ctrl->MST.sendACT = 0; - ctrl->MST.singleHeadMSTPipeline = 0; - ctrl->MST.bEnableAudioOverRightPanel = 0; - WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); -} - -static int -r535_sor_dp_sst(struct nvkm_ior *sor, int head, bool ef, - u32 watermark, u32 hblanksym, u32 vblanksym) -{ - struct nvkm_disp *disp = sor->disp; - struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->subDeviceInstance = 0; - ctrl->head = head; - ctrl->sorIndex = sor->id; - ctrl->dpLink = sor->asy.link == 2; - ctrl->bEnableOverride = 1; - ctrl->bMST = 0; - ctrl->hBlankSym = hblanksym; - ctrl->vBlankSym = vblanksym; - ctrl->colorFormat = 0; - ctrl->bEnableTwoHeadOneOr = 0; - ctrl->SST.bEnhancedFraming = ef; - ctrl->SST.tuSize = 64; - ctrl->SST.waterMark = watermark; - ctrl->SST.bEnableAudioOverRightPanel = 0; - return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); -} - -static const struct nvkm_ior_func_dp -r535_sor_dp = { - .sst = r535_sor_dp_sst, - .vcpi = r535_sor_dp_vcpi, - .audio = r535_sor_dp_audio, -}; - -static void -r535_sor_hdmi_scdc(struct nvkm_ior *sor, u32 khz, bool support, bool scrambling, - bool scrambling_low_rates) -{ - struct nvkm_outp *outp = sor->asy.outp; - struct nvkm_disp *disp = outp->disp; - NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, sizeof(*ctrl)); - if (WARN_ON(IS_ERR(ctrl))) - return; - - ctrl->displayId = BIT(outp->index); - ctrl->caps = 0; - if (support) - ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, SCDC_SUPPORTED, TRUE); - if (scrambling) - ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, GT_340MHZ_CLOCK_SUPPORTED, TRUE); - if (scrambling_low_rates) - ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, LTE_340MHZ_SCRAMBLING_SUPPORTED, TRUE); - - WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); -} - -static void -r535_sor_hdmi_ctrl_audio_mute(struct nvkm_outp *outp, bool mute) -{ - struct nvkm_disp *disp = outp->disp; - NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM, sizeof(*ctrl)); - if (WARN_ON(IS_ERR(ctrl))) - return; - - ctrl->displayId = BIT(outp->index); - ctrl->mute = mute; - WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); -} - -static void -r535_sor_hdmi_ctrl_audio(struct nvkm_outp *outp, bool enable) -{ - struct nvkm_disp *disp = outp->disp; - NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, sizeof(*ctrl)); - if (WARN_ON(IS_ERR(ctrl))) - return; - - ctrl->displayId = BIT(outp->index); - ctrl->transmitControl = - NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ENABLE, YES) | - NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, OTHER_FRAME, DISABLE) | - NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, SINGLE_FRAME, DISABLE) | - NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ON_HBLANK, DISABLE) | - NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, VIDEO_FMT, SW_CONTROLLED) | - NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, RESERVED_LEGACY_MODE, NO); - ctrl->packetSize = 10; - ctrl->aPacket[0] = 0x03; - ctrl->aPacket[1] = 0x00; - ctrl->aPacket[2] = 0x00; - ctrl->aPacket[3] = enable ? 0x10 : 0x01; - ctrl->aPacket[4] = 0x00; - ctrl->aPacket[5] = 0x00; - ctrl->aPacket[6] = 0x00; - ctrl->aPacket[7] = 0x00; - ctrl->aPacket[8] = 0x00; - ctrl->aPacket[9] = 0x00; - WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); -} - -static void -r535_sor_hdmi_audio(struct nvkm_ior *sor, int head, bool enable) -{ - struct nvkm_device *device = sor->disp->engine.subdev.device; - const u32 hdmi = head * 0x400; - - r535_sor_hdmi_ctrl_audio(sor->asy.outp, enable); - r535_sor_hdmi_ctrl_audio_mute(sor->asy.outp, !enable); - - /* General Control (GCP). */ - nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000); - nvkm_wr32(device, 0x6f00cc + hdmi, !enable ? 0x00000001 : 0x00000010); - nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000001); -} - -static void -r535_sor_hdmi_ctrl(struct nvkm_ior *sor, int head, bool enable, u8 max_ac_packet, u8 rekey) -{ - struct nvkm_disp *disp = sor->disp; - NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *ctrl; - - if (!enable) - return; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE, sizeof(*ctrl)); - if (WARN_ON(IS_ERR(ctrl))) - return; - - ctrl->displayId = BIT(sor->asy.outp->index); - ctrl->enable = enable; - - WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); -} - -static const struct nvkm_ior_func_hdmi -r535_sor_hdmi = { - .ctrl = r535_sor_hdmi_ctrl, - .scdc = r535_sor_hdmi_scdc, - /*TODO: SF_USER -> KMS. */ - .infoframe_avi = gv100_sor_hdmi_infoframe_avi, - .infoframe_vsi = gv100_sor_hdmi_infoframe_vsi, - .audio = r535_sor_hdmi_audio, -}; - -static const struct nvkm_ior_func -r535_sor = { - .hdmi = &r535_sor_hdmi, - .dp = &r535_sor_dp, - .hda = &r535_sor_hda, - .bl = &r535_sor_bl, -}; - -static int -r535_sor_new(struct nvkm_disp *disp, int id) -{ - return nvkm_ior_new_(&r535_sor, disp, SOR, id, true/*XXX: hda cap*/); -} - -static int -r535_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask) -{ - *pmask = 0xf; - return 4; -} - -static void -r535_head_vblank_put(struct nvkm_head *head) -{ - struct nvkm_device *device = head->disp->engine.subdev.device; - - nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000000); -} - -static void -r535_head_vblank_get(struct nvkm_head *head) -{ - struct nvkm_device *device = head->disp->engine.subdev.device; - - nvkm_wr32(device, 0x611800 + (head->id * 4), 0x00000002); - nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000002); -} - -static void -r535_head_state(struct nvkm_head *head, struct nvkm_head_state *state) -{ -} - -static const struct nvkm_head_func -r535_head = { - .state = r535_head_state, - .vblank_get = r535_head_vblank_get, - .vblank_put = r535_head_vblank_put, -}; - -static struct nvkm_conn * -r535_conn_new(struct nvkm_disp *disp, u32 id) -{ - NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *ctrl; - struct nvbios_connE dcbE = {}; - struct nvkm_conn *conn; - int ret, index; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return (void *)ctrl; - - ctrl->subDeviceInstance = 0; - ctrl->displayId = BIT(id); - - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ERR_PTR(ret); - } - - list_for_each_entry(conn, &disp->conns, head) { - if (conn->index == ctrl->data[0].index) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return conn; - } - } - - dcbE.type = ctrl->data[0].type; - index = ctrl->data[0].index; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - - ret = nvkm_conn_new(disp, index, &dcbE, &conn); - if (ret) - return ERR_PTR(ret); - - list_add_tail(&conn->head, &disp->conns); - return conn; -} - -static void -r535_outp_release(struct nvkm_outp *outp) -{ - outp->disp->rm.assigned_sors &= ~BIT(outp->ior->id); - outp->ior->asy.outp = NULL; - outp->ior = NULL; -} - -static int -r535_outp_acquire(struct nvkm_outp *outp, bool hda) -{ - struct nvkm_disp *disp = outp->disp; - struct nvkm_ior *ior; - NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *ctrl; - int ret, or; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_DFP_ASSIGN_SOR, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->subDeviceInstance = 0; - ctrl->displayId = BIT(outp->index); - ctrl->sorExcludeMask = disp->rm.assigned_sors; - if (hda) - ctrl->flags |= NVDEF(NV0073_CTRL, DFP_ASSIGN_SOR_FLAGS, AUDIO, OPTIMAL); - - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; - } - - for (or = 0; or < ARRAY_SIZE(ctrl->sorAssignListWithTag); or++) { - if (ctrl->sorAssignListWithTag[or].displayMask & BIT(outp->index)) { - disp->rm.assigned_sors |= BIT(or); - break; - } - } - - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - - if (WARN_ON(or == ARRAY_SIZE(ctrl->sorAssignListWithTag))) - return -EINVAL; - - ior = nvkm_ior_find(disp, SOR, or); - if (WARN_ON(!ior)) - return -EINVAL; - - nvkm_outp_acquire_ior(outp, NVKM_OUTP_USER, ior); - return 0; -} - -static int -r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid) -{ - NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl; - int ret; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->subDeviceInstance = 0; - ctrl->head = head; - - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; - } - - *displayid = ctrl->displayId; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return 0; -} - -static struct nvkm_ior * -r535_outp_inherit(struct nvkm_outp *outp) -{ - struct nvkm_disp *disp = outp->disp; - struct nvkm_head *head; - u32 displayid; - int ret; - - list_for_each_entry(head, &disp->heads, head) { - ret = r535_disp_head_displayid(disp, head->id, &displayid); - if (WARN_ON(ret)) - return NULL; - - if (displayid == BIT(outp->index)) { - NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl; - u32 id, proto; - struct nvkm_ior *ior; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return NULL; - - ctrl->subDeviceInstance = 0; - ctrl->displayId = displayid; - - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return NULL; - } - - id = ctrl->index; - proto = ctrl->protocol; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - - ior = nvkm_ior_find(disp, SOR, id); - if (WARN_ON(!ior)) - return NULL; - - switch (proto) { - case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A: - ior->arm.proto = TMDS; - ior->arm.link = 1; - break; - case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B: - ior->arm.proto = TMDS; - ior->arm.link = 2; - break; - case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS: - ior->arm.proto = TMDS; - ior->arm.link = 3; - break; - case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A: - ior->arm.proto = DP; - ior->arm.link = 1; - break; - case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B: - ior->arm.proto = DP; - ior->arm.link = 2; - break; - default: - WARN_ON(1); - return NULL; - } - - ior->arm.proto_evo = proto; - ior->arm.head = BIT(head->id); - disp->rm.assigned_sors |= BIT(ior->id); - return ior; - } - } - - return NULL; -} - -static int -r535_outp_dfp_get_info(struct nvkm_outp *outp) -{ - NV0073_CTRL_DFP_GET_INFO_PARAMS *ctrl; - struct nvkm_disp *disp = outp->disp; - int ret; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DFP_GET_INFO, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->displayId = BIT(outp->index); - - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; - } - - nvkm_debug(&disp->engine.subdev, "DFP %08x: flags:%08x flags2:%08x\n", - ctrl->displayId, ctrl->flags, ctrl->flags2); - - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return 0; -} - -static int -r535_outp_detect(struct nvkm_outp *outp) -{ - NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl; - struct nvkm_disp *disp = outp->disp; - int ret; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->subDeviceInstance = 0; - ctrl->displayMask = BIT(outp->index); - - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; - } - - if (ctrl->displayMask & BIT(outp->index)) { - ret = r535_outp_dfp_get_info(outp); - if (ret == 0) - ret = 1; - } else { - ret = 0; - } - - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; -} - -static int -r535_dp_mst_id_put(struct nvkm_outp *outp, u32 id) -{ - NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *ctrl; - struct nvkm_disp *disp = outp->disp; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->subDeviceInstance = 0; - ctrl->displayId = id; - return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); -} - -static int -r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid) -{ - NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *ctrl; - struct nvkm_disp *disp = outp->disp; - int ret; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->subDeviceInstance = 0; - ctrl->displayId = BIT(outp->index); - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; - } - - *pid = ctrl->displayIdAssigned; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return 0; -} - -static int -r535_dp_drive(struct nvkm_outp *outp, u8 lanes, u8 pe[4], u8 vs[4]) -{ - NV0073_CTRL_DP_LANE_DATA_PARAMS *ctrl; - struct nvkm_disp *disp = outp->disp; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_DP_SET_LANE_DATA, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->displayId = BIT(outp->index); - ctrl->numLanes = lanes; - for (int i = 0; i < lanes; i++) - ctrl->data[i] = NVVAL(NV0073_CTRL, DP_LANE_DATA, PREEMPHASIS, pe[i]) | - NVVAL(NV0073_CTRL, DP_LANE_DATA, DRIVECURRENT, vs[i]); - - return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); -} - -static int -r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8 link_bw) -{ - struct nvkm_disp *disp = outp->disp; - NV0073_CTRL_DP_CTRL_PARAMS *ctrl; - int ret, retries; - u32 cmd, data; - - cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) | - NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) | - NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES); - data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) | - NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) | - NVVAL(NV0073_CTRL, DP_DATA, TARGET, target); - - if (mst) - cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM); - - if (outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP) - cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE); - - if (target == 0 && - (outp->dp.dpcd[DPCD_RC02] & 0x20) && - !(outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED)) - cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES); - - /* We should retry up to 3 times, but only if GSP asks politely */ - for (retries = 0; retries < 3; ++retries) { - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->subDeviceInstance = 0; - ctrl->displayId = BIT(outp->index); - ctrl->retryTimeMs = 0; - ctrl->cmd = cmd; - ctrl->data = data; - - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) { - /* - * Device (likely an eDP panel) isn't ready yet, wait for the time specified - * by GSP before retrying again - */ - nvkm_debug(&disp->engine.subdev, - "Waiting %dms for GSP LT panel delay before retrying\n", - ctrl->retryTimeMs); - msleep(ctrl->retryTimeMs); - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - } else { - /* GSP didn't say to retry, or we were successful */ - if (ctrl->err) - ret = -EIO; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - break; - } - } - - return ret; -} - -static int -r535_dp_train(struct nvkm_outp *outp, bool retrain) -{ - for (int target = outp->dp.lttprs; target >= 0; target--) { - int ret = r535_dp_train_target(outp, target, outp->dp.lt.mst, - outp->dp.lt.nr, - outp->dp.lt.bw); - if (ret) - return ret; - } - - return 0; -} - -static int -r535_dp_rates(struct nvkm_outp *outp) -{ - NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl; - struct nvkm_disp *disp = outp->disp; - - if (outp->conn->info.type != DCB_CONNECTOR_eDP || - !outp->dp.rates || outp->dp.rate[0].dpcd < 0) - return 0; - - if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl))) - return -EINVAL; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->displayId = BIT(outp->index); - for (int i = 0; i < outp->dp.rates; i++) - ctrl->linkRateTbl[outp->dp.rate[i].dpcd] = outp->dp.rate[i].rate * 10 / 200; - - return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); -} - -static int -r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize) -{ - struct nvkm_disp *disp = outp->disp; - NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *ctrl; - u8 size = *psize; - int ret; - int retries; - - for (retries = 0; retries < 3; ++retries) { - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->subDeviceInstance = 0; - ctrl->displayId = BIT(outp->index); - ctrl->bAddrOnly = !size; - ctrl->cmd = type; - if (ctrl->bAddrOnly) { - ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE); - ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE); - } - ctrl->addr = addr; - ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0; - memcpy(ctrl->data, data, size); - - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) { - /* - * Device (likely an eDP panel) isn't ready yet, wait for the time specified - * by GSP before retrying again - */ - nvkm_debug(&disp->engine.subdev, - "Waiting %dms for GSP LT panel delay before retrying in AUX\n", - ctrl->retryTimeMs); - msleep(ctrl->retryTimeMs); - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - } else { - memcpy(data, ctrl->data, size); - *psize = ctrl->size; - ret = ctrl->replyType; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - break; - } - } - return ret; -} - -static int -r535_dp_aux_pwr(struct nvkm_outp *outp, bool pu) -{ - return 0; -} - -static void -r535_dp_release(struct nvkm_outp *outp) -{ - if (!outp->dp.lt.bw) { - if (!WARN_ON(!outp->dp.rates)) - outp->dp.lt.bw = outp->dp.rate[0].rate / 27000; - else - outp->dp.lt.bw = 0x06; - } - - outp->dp.lt.nr = 0; - - r535_dp_train_target(outp, 0, outp->dp.lt.mst, outp->dp.lt.nr, outp->dp.lt.bw); - r535_outp_release(outp); -} - -static int -r535_dp_acquire(struct nvkm_outp *outp, bool hda) -{ - int ret; - - ret = r535_outp_acquire(outp, hda); - if (ret) - return ret; - - return 0; -} - -static const struct nvkm_outp_func -r535_dp = { - .detect = r535_outp_detect, - .inherit = r535_outp_inherit, - .acquire = r535_dp_acquire, - .release = r535_dp_release, - .dp.aux_pwr = r535_dp_aux_pwr, - .dp.aux_xfer = r535_dp_aux_xfer, - .dp.mst_id_get = r535_dp_mst_id_get, - .dp.mst_id_put = r535_dp_mst_id_put, - .dp.rates = r535_dp_rates, - .dp.train = r535_dp_train, - .dp.drive = r535_dp_drive, -}; - -static int -r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize) -{ - NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *ctrl; - struct nvkm_disp *disp = outp->disp; - int ret = -E2BIG; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->subDeviceInstance = 0; - ctrl->displayId = BIT(outp->index); - - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; - } - - ret = -E2BIG; - if (ctrl->bufferSize <= *psize) { - memcpy(data, ctrl->edidBuffer, ctrl->bufferSize); - *psize = ctrl->bufferSize; - ret = 0; - } - - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; -} - -static const struct nvkm_outp_func -r535_tmds = { - .detect = r535_outp_detect, - .inherit = r535_outp_inherit, - .acquire = r535_outp_acquire, - .release = r535_outp_release, - .edid_get = r535_tmds_edid_get, -}; - -static int -r535_outp_new(struct nvkm_disp *disp, u32 id) -{ - NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl; - enum nvkm_ior_proto proto; - struct dcb_output dcbE = {}; - struct nvkm_conn *conn; - struct nvkm_outp *outp; - u8 locn, link = 0; - int ret; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->subDeviceInstance = 0; - ctrl->displayId = BIT(id); - - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; - } - - switch (ctrl->type) { - case NV0073_CTRL_SPECIFIC_OR_TYPE_NONE: - return 0; - case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: - switch (ctrl->protocol) { - case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A: - proto = TMDS; - link = 1; - break; - case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B: - proto = TMDS; - link = 2; - break; - case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS: - proto = TMDS; - link = 3; - break; - case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A: - proto = DP; - link = 1; - break; - case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B: - proto = DP; - link = 2; - break; - default: - WARN_ON(1); - return -EINVAL; - } - - break; - default: - WARN_ON(1); - return -EINVAL; - } - - locn = ctrl->location; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - - conn = r535_conn_new(disp, id); - if (IS_ERR(conn)) - return PTR_ERR(conn); - - switch (proto) { - case TMDS: dcbE.type = DCB_OUTPUT_TMDS; break; - case DP: dcbE.type = DCB_OUTPUT_DP; break; - default: - WARN_ON(1); - return -EINVAL; - } - - dcbE.location = locn; - dcbE.connector = conn->index; - dcbE.heads = disp->head.mask; - dcbE.i2c_index = 0xff; - dcbE.link = dcbE.sorconf.link = link; - - if (proto == TMDS) { - ret = nvkm_outp_new_(&r535_tmds, disp, id, &dcbE, &outp); - if (ret) - return ret; - } else { - NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl; - bool mst, wm; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->sorIndex = ~0; - - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; - } - - switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) { - case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62: - dcbE.dpconf.link_bw = 0x06; - break; - case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70: - dcbE.dpconf.link_bw = 0x0a; - break; - case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40: - dcbE.dpconf.link_bw = 0x14; - break; - case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10: - dcbE.dpconf.link_bw = 0x1e; - break; - default: - dcbE.dpconf.link_bw = 0x00; - break; - } - - mst = ctrl->bIsMultistreamSupported; - wm = ctrl->bHasIncreasedWatermarkLimits; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - - if (WARN_ON(!dcbE.dpconf.link_bw)) - return -EINVAL; - - dcbE.dpconf.link_nr = 4; - - ret = nvkm_outp_new_(&r535_dp, disp, id, &dcbE, &outp); - if (ret) - return ret; - - outp->dp.mst = mst; - outp->dp.increased_wm = wm; - } - - - outp->conn = conn; - list_add_tail(&outp->head, &disp->outps); - return 0; -} - -static void -r535_disp_irq(struct nvkm_gsp_event *event, void *repv, u32 repc) -{ - struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.irq); - Nv2080DpIrqNotification *irq = repv; - - if (WARN_ON(repc < sizeof(*irq))) - return; - - nvkm_debug(&disp->engine.subdev, "event: dp irq displayId %08x\n", irq->displayId); - - if (irq->displayId) - nvkm_event_ntfy(&disp->rm.event, fls(irq->displayId) - 1, NVKM_DPYID_IRQ); -} - -static void -r535_disp_hpd(struct nvkm_gsp_event *event, void *repv, u32 repc) -{ - struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.hpd); - Nv2080HotplugNotification *hpd = repv; - - if (WARN_ON(repc < sizeof(*hpd))) - return; - - nvkm_debug(&disp->engine.subdev, "event: hpd plug %08x unplug %08x\n", - hpd->plugDisplayMask, hpd->unplugDisplayMask); - - for (int i = 0; i < 31; i++) { - u32 mask = 0; - - if (hpd->plugDisplayMask & BIT(i)) - mask |= NVKM_DPYID_PLUG; - if (hpd->unplugDisplayMask & BIT(i)) - mask |= NVKM_DPYID_UNPLUG; - - if (mask) - nvkm_event_ntfy(&disp->rm.event, i, mask); - } -} - -static const struct nvkm_event_func -r535_disp_event = { -}; - -static void -r535_disp_intr_head_timing(struct nvkm_disp *disp, int head) -{ - struct nvkm_subdev *subdev = &disp->engine.subdev; - struct nvkm_device *device = subdev->device; - u32 stat = nvkm_rd32(device, 0x611c00 + (head * 0x04)); - - if (stat & 0x00000002) { - nvkm_disp_vblank(disp, head); - - nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000002); - } -} - -static irqreturn_t -r535_disp_intr(struct nvkm_inth *inth) -{ - struct nvkm_disp *disp = container_of(inth, typeof(*disp), engine.subdev.inth); - struct nvkm_subdev *subdev = &disp->engine.subdev; - struct nvkm_device *device = subdev->device; - unsigned long mask = nvkm_rd32(device, 0x611ec0) & 0x000000ff; - int head; - - for_each_set_bit(head, &mask, 8) - r535_disp_intr_head_timing(disp, head); - - return IRQ_HANDLED; -} - -static void -r535_disp_fini(struct nvkm_disp *disp, bool suspend) -{ - if (!disp->engine.subdev.use.enabled) - return; - - nvkm_gsp_rm_free(&disp->rm.object); - - if (!suspend) { - nvkm_gsp_event_dtor(&disp->rm.irq); - nvkm_gsp_event_dtor(&disp->rm.hpd); - nvkm_event_fini(&disp->rm.event); - - nvkm_gsp_rm_free(&disp->rm.objcom); - nvkm_gsp_device_dtor(&disp->rm.device); - nvkm_gsp_client_dtor(&disp->rm.client); - } -} - -static int -r535_disp_init(struct nvkm_disp *disp) -{ - int ret; - - ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, disp->func->root.oclass << 16, - disp->func->root.oclass, 0, &disp->rm.object); - if (ret) - return ret; - - return 0; -} - -static int -r535_disp_oneinit(struct nvkm_disp *disp) -{ - struct nvkm_device *device = disp->engine.subdev.device; - struct nvkm_gsp *gsp = device->gsp; - NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *ctrl; - int ret, i; - - /* RAMIN. */ - ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL, &disp->inst); - if (ret) - return ret; - - if (WARN_ON(nvkm_memory_target(disp->inst->memory) != NVKM_MEM_TARGET_VRAM)) - return -EINVAL; - - ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, - NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->instMemPhysAddr = nvkm_memory_addr(disp->inst->memory); - ctrl->instMemSize = nvkm_memory_size(disp->inst->memory); - ctrl->instMemAddrSpace = ADDR_FBMEM; - ctrl->instMemCpuCacheAttr = NV_MEMORY_WRITECOMBINED; - - ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); - if (ret) - return ret; - - /* OBJs. */ - ret = nvkm_gsp_client_device_ctor(gsp, &disp->rm.client, &disp->rm.device); - if (ret) - return ret; - - ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, 0x00730000, NV04_DISPLAY_COMMON, 0, - &disp->rm.objcom); - if (ret) - return ret; - - { - NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, - NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - disp->wndw.mask = ctrl->windowPresentMask; - disp->wndw.nr = fls(disp->wndw.mask); - nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); - } - - /* */ - { -#if defined(CONFIG_ACPI) && defined(CONFIG_X86) - NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS *ctrl; - struct nvkm_gsp_object *subdevice = &disp->rm.client.gsp->internal.device.subdevice; - - ctrl = nvkm_gsp_rm_ctrl_get(subdevice, - NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->status = 0x56; /* NV_ERR_NOT_SUPPORTED */ - - { - const guid_t NBCI_DSM_GUID = - GUID_INIT(0xD4A50B75, 0x65C7, 0x46F7, - 0xBF, 0xB7, 0x41, 0x51, 0x4C, 0xEA, 0x02, 0x44); - u64 NBCI_DSM_REV = 0x00000102; - const guid_t NVHG_DSM_GUID = - GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48, - 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4); - u64 NVHG_DSM_REV = 0x00000102; - acpi_handle handle = ACPI_HANDLE(device->dev); - - if (handle && acpi_has_method(handle, "_DSM")) { - bool nbci = acpi_check_dsm(handle, &NBCI_DSM_GUID, NBCI_DSM_REV, - 1ULL << 0x00000014); - bool nvhg = acpi_check_dsm(handle, &NVHG_DSM_GUID, NVHG_DSM_REV, - 1ULL << 0x00000014); - - if (nbci || nvhg) { - union acpi_object argv4 = { - .buffer.type = ACPI_TYPE_BUFFER, - .buffer.length = sizeof(ctrl->backLightData), - .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), - }, *obj; - - obj = acpi_evaluate_dsm(handle, nbci ? &NBCI_DSM_GUID : &NVHG_DSM_GUID, - 0x00000102, 0x14, &argv4); - if (!obj) { - acpi_handle_info(handle, "failed to evaluate _DSM\n"); - } else { - for (int i = 0; i < obj->package.count; i++) { - union acpi_object *elt = &obj->package.elements[i]; - u32 size; - - if (elt->integer.value & ~0xffffffffULL) - size = 8; - else - size = 4; - - memcpy(&ctrl->backLightData[ctrl->backLightDataSize], &elt->integer.value, size); - ctrl->backLightDataSize += size; - } - - ctrl->status = 0; - ACPI_FREE(obj); - } - - kfree(argv4.buffer.pointer); - } - } - } - - ret = nvkm_gsp_rm_ctrl_wr(subdevice, ctrl); - if (ret) - return ret; -#endif - } - - /* */ - { - NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ret = nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); - if (ret) - return ret; - } - - /* */ - { - NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom, - NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - disp->head.nr = ctrl->numHeads; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - } - - /* */ - { - NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom, - NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - disp->head.mask = ctrl->headMask; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - - for_each_set_bit(i, &disp->head.mask, disp->head.nr) { - ret = nvkm_head_new_(&r535_head, disp, i); - if (ret) - return ret; - } - } - - disp->sor.nr = disp->func->sor.cnt(disp, &disp->sor.mask); - nvkm_debug(&disp->engine.subdev, " SOR(s): %d (%02lx)\n", disp->sor.nr, disp->sor.mask); - for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) { - ret = disp->func->sor.new(disp, i); - if (ret) - return ret; - } - - /* */ - { - NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl; - unsigned long mask; - int i; - - ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom, - NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - mask = ctrl->displayMask; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - - for_each_set_bit(i, &mask, 32) { - ret = r535_outp_new(disp, i); - if (ret) - return ret; - } - } - - ret = nvkm_event_init(&r535_disp_event, &gsp->subdev, 3, 32, &disp->rm.event); - if (WARN_ON(ret)) - return ret; - - ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0000, NV2080_NOTIFIERS_HOTPLUG, - r535_disp_hpd, &disp->rm.hpd); - if (ret) - return ret; - - ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0001, NV2080_NOTIFIERS_DP_IRQ, - r535_disp_irq, &disp->rm.irq); - if (ret) - return ret; - - /* RAMHT. */ - ret = nvkm_ramht_new(device, disp->func->ramht_size ? disp->func->ramht_size : - 0x1000, 0, disp->inst, &disp->ramht); - if (ret) - return ret; - - ret = nvkm_gsp_intr_stall(gsp, disp->engine.subdev.type, disp->engine.subdev.inst); - if (ret < 0) - return ret; - - ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &disp->engine.subdev, - r535_disp_intr, &disp->engine.subdev.inth); - if (ret) - return ret; - - nvkm_inth_allow(&disp->engine.subdev.inth); - return 0; -} - -static void -r535_disp_dtor(struct nvkm_disp *disp) -{ - kfree(disp->func); -} - -int -r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp) -{ - struct nvkm_disp_func *rm; - int ret; - - if (!(rm = kzalloc(sizeof(*rm) + 6 * sizeof(rm->user[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_disp_dtor; - rm->oneinit = r535_disp_oneinit; - rm->init = r535_disp_init; - rm->fini = r535_disp_fini; - rm->uevent = hw->uevent; - rm->sor.cnt = r535_sor_cnt; - rm->sor.new = r535_sor_new; - rm->ramht_size = hw->ramht_size; - - rm->root = hw->root; - - for (int i = 0; hw->user[i].ctor; i++) { - switch (hw->user[i].base.oclass & 0xff) { - case 0x73: rm->user[i] = hw->user[i]; break; - case 0x7d: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_core; break; - case 0x7e: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wndw; break; - case 0x7b: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wimm; break; - case 0x7a: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_curs; break; - default: - WARN_ON(1); - continue; - } - } - - ret = nvkm_disp_new_(rm, device, type, inst, pdisp); - if (ret) - kfree(rm); - - mutex_init(&(*pdisp)->super.mutex); //XXX - return ret; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild index aff92848abfe..5a074b9970ab 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild @@ -26,7 +26,5 @@ nvkm-y += nvkm/engine/fifo/tu102.o nvkm-y += nvkm/engine/fifo/ga100.o nvkm-y += nvkm/engine/fifo/ga102.o -nvkm-y += nvkm/engine/fifo/r535.o - nvkm-y += nvkm/engine/fifo/ucgrp.o nvkm-y += nvkm/engine/fifo/uchan.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c deleted file mode 100644 index 129f274c9bfd..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c +++ /dev/null @@ -1,550 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" -#include "cgrp.h" -#include "chan.h" -#include "chid.h" -#include "runl.h" - -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static u32 -r535_chan_doorbell_handle(struct nvkm_chan *chan) -{ - return (chan->cgrp->runl->id << 16) | chan->id; -} - -static void -r535_chan_stop(struct nvkm_chan *chan) -{ -} - -static void -r535_chan_start(struct nvkm_chan *chan) -{ -} - -static void -r535_chan_ramfc_clear(struct nvkm_chan *chan) -{ - struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; - - nvkm_gsp_rm_free(&chan->rm.object); - - dma_free_coherent(fifo->engine.subdev.device->dev, fifo->rm.mthdbuf_size, - chan->rm.mthdbuf.ptr, chan->rm.mthdbuf.addr); - - nvkm_cgrp_vctx_put(chan->cgrp, &chan->rm.grctx); -} - -#define CHID_PER_USERD 8 - -static int -r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv) -{ - struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; - struct nvkm_engn *engn; - struct nvkm_device *device = fifo->engine.subdev.device; - NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args; - const int userd_p = chan->id / CHID_PER_USERD; - const int userd_i = chan->id % CHID_PER_USERD; - u32 eT = ~0; - int ret; - - if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) { - ret = nvkm_subdev_oneinit(&device->gr->engine.subdev); - if (ret) - return ret; - } - - nvkm_runl_foreach_engn(engn, chan->cgrp->runl) { - eT = engn->id; - break; - } - - if (WARN_ON(eT == ~0)) - return -EINVAL; - - chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev, - fifo->rm.mthdbuf_size, - &chan->rm.mthdbuf.addr, GFP_KERNEL); - if (!chan->rm.mthdbuf.ptr) - return -ENOMEM; - - args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, 0xf1f00000 | chan->id, - fifo->func->chan.user.oclass, sizeof(*args), - &chan->rm.object); - if (WARN_ON(IS_ERR(args))) - return PTR_ERR(args); - - args->gpFifoOffset = offset; - args->gpFifoEntries = length / 8; - - args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL); - args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE); - args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE); - args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, chan->runq); - if (!priv) - args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE); - else - args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE); - args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE); - args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE); - - args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i); - args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE); - args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p); - args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE); - - args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE); - args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE); - args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE); - args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE); - args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE); - args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE); - args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT); - args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE); - args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE); - - args->hVASpace = chan->vmm->rm.object.handle; - args->engineType = eT; - - args->instanceMem.base = chan->inst->addr; - args->instanceMem.size = chan->inst->size; - args->instanceMem.addressSpace = 2; - args->instanceMem.cacheAttrib = 1; - - args->userdMem.base = nvkm_memory_addr(chan->userd.mem) + chan->userd.base; - args->userdMem.size = fifo->func->chan.func->userd->size; - args->userdMem.addressSpace = 2; - args->userdMem.cacheAttrib = 1; - - args->ramfcMem.base = chan->inst->addr + 0; - args->ramfcMem.size = 0x200; - args->ramfcMem.addressSpace = 2; - args->ramfcMem.cacheAttrib = 1; - - args->mthdbufMem.base = chan->rm.mthdbuf.addr; - args->mthdbufMem.size = fifo->rm.mthdbuf_size; - args->mthdbufMem.addressSpace = 1; - args->mthdbufMem.cacheAttrib = 0; - - if (!priv) - args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER); - else - args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN); - args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE); - args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE); - - ret = nvkm_gsp_rm_alloc_wr(&chan->rm.object, args); - if (ret) - return ret; - - if (1) { - NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *ctrl; - - if (1) { - NVA06F_CTRL_BIND_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object, - NVA06F_CTRL_CMD_BIND, sizeof(*ctrl)); - if (WARN_ON(IS_ERR(ctrl))) - return PTR_ERR(ctrl); - - ctrl->engineType = eT; - - ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl); - if (ret) - return ret; - } - - ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object, - NVA06F_CTRL_CMD_GPFIFO_SCHEDULE, sizeof(*ctrl)); - if (WARN_ON(IS_ERR(ctrl))) - return PTR_ERR(ctrl); - - ctrl->bEnable = 1; - ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl); - } - - return ret; -} - -static const struct nvkm_chan_func_ramfc -r535_chan_ramfc = { - .write = r535_chan_ramfc_write, - .clear = r535_chan_ramfc_clear, - .devm = 0xfff, - .priv = true, -}; - -static const struct nvkm_chan_func -r535_chan = { - .inst = &gf100_chan_inst, - .userd = &gv100_chan_userd, - .ramfc = &r535_chan_ramfc, - .start = r535_chan_start, - .stop = r535_chan_stop, - .doorbell_handle = r535_chan_doorbell_handle, -}; - -static const struct nvkm_cgrp_func -r535_cgrp = { -}; - -static int -r535_engn_nonstall(struct nvkm_engn *engn) -{ - struct nvkm_subdev *subdev = &engn->engine->subdev; - int ret; - - ret = nvkm_gsp_intr_nonstall(subdev->device->gsp, subdev->type, subdev->inst); - WARN_ON(ret == -ENOENT); - return ret; -} - -static const struct nvkm_engn_func -r535_ce = { - .nonstall = r535_engn_nonstall, -}; - -static int -r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan) -{ - /* RM requires GR context buffers to remain mapped until after the - * channel has been destroyed (as opposed to after the last gr obj - * has been deleted). - * - * Take an extra ref here, which will be released once the channel - * object has been deleted. - */ - refcount_inc(&vctx->refs); - chan->rm.grctx = vctx; - return 0; -} - -static const struct nvkm_engn_func -r535_gr = { - .nonstall = r535_engn_nonstall, - .ctor2 = r535_gr_ctor, -}; - -static int -r535_flcn_bind(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan) -{ - struct nvkm_gsp_client *client = &chan->vmm->rm.client; - NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_get(&chan->vmm->rm.device.subdevice, - NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->hClient = client->object.handle; - ctrl->hObject = chan->rm.object.handle; - ctrl->hChanClient = client->object.handle; - ctrl->virtAddress = vctx->vma->addr; - ctrl->size = vctx->inst->size; - ctrl->engineType = engn->id; - ctrl->ChID = chan->id; - - return nvkm_gsp_rm_ctrl_wr(&chan->vmm->rm.device.subdevice, ctrl); -} - -static int -r535_flcn_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan) -{ - int ret; - - if (WARN_ON(!engn->rm.size)) - return -EINVAL; - - ret = nvkm_gpuobj_new(engn->engine->subdev.device, engn->rm.size, 0, true, NULL, - &vctx->inst); - if (ret) - return ret; - - ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma); - if (ret) - return ret; - - ret = nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, NULL, 0); - if (ret) - return ret; - - return r535_flcn_bind(engn, vctx, chan); -} - -static const struct nvkm_engn_func -r535_flcn = { - .nonstall = r535_engn_nonstall, - .ctor2 = r535_flcn_ctor, -}; - -static void -r535_runl_allow(struct nvkm_runl *runl, u32 engm) -{ -} - -static void -r535_runl_block(struct nvkm_runl *runl, u32 engm) -{ -} - -static const struct nvkm_runl_func -r535_runl = { - .block = r535_runl_block, - .allow = r535_runl_allow, -}; - -static int -r535_fifo_2080_type(enum nvkm_subdev_type type, int inst) -{ - switch (type) { - case NVKM_ENGINE_GR: return NV2080_ENGINE_TYPE_GR0; - case NVKM_ENGINE_CE: return NV2080_ENGINE_TYPE_COPY0 + inst; - case NVKM_ENGINE_SEC2: return NV2080_ENGINE_TYPE_SEC2; - case NVKM_ENGINE_NVDEC: return NV2080_ENGINE_TYPE_NVDEC0 + inst; - case NVKM_ENGINE_NVENC: return NV2080_ENGINE_TYPE_NVENC0 + inst; - case NVKM_ENGINE_NVJPG: return NV2080_ENGINE_TYPE_NVJPEG0 + inst; - case NVKM_ENGINE_OFA: return NV2080_ENGINE_TYPE_OFA; - case NVKM_ENGINE_SW: return NV2080_ENGINE_TYPE_SW; - default: - break; - } - - WARN_ON(1); - return -EINVAL; -} - -static int -r535_fifo_engn_type(RM_ENGINE_TYPE rm, enum nvkm_subdev_type *ptype) -{ - switch (rm) { - case RM_ENGINE_TYPE_GR0: - *ptype = NVKM_ENGINE_GR; - return 0; - case RM_ENGINE_TYPE_COPY0...RM_ENGINE_TYPE_COPY9: - *ptype = NVKM_ENGINE_CE; - return rm - RM_ENGINE_TYPE_COPY0; - case RM_ENGINE_TYPE_NVDEC0...RM_ENGINE_TYPE_NVDEC7: - *ptype = NVKM_ENGINE_NVDEC; - return rm - RM_ENGINE_TYPE_NVDEC0; - case RM_ENGINE_TYPE_NVENC0...RM_ENGINE_TYPE_NVENC2: - *ptype = NVKM_ENGINE_NVENC; - return rm - RM_ENGINE_TYPE_NVENC0; - case RM_ENGINE_TYPE_SW: - *ptype = NVKM_ENGINE_SW; - return 0; - case RM_ENGINE_TYPE_SEC2: - *ptype = NVKM_ENGINE_SEC2; - return 0; - case RM_ENGINE_TYPE_NVJPEG0...RM_ENGINE_TYPE_NVJPEG7: - *ptype = NVKM_ENGINE_NVJPG; - return rm - RM_ENGINE_TYPE_NVJPEG0; - case RM_ENGINE_TYPE_OFA: - *ptype = NVKM_ENGINE_OFA; - return 0; - default: - return -EINVAL; - } -} - -static int -r535_fifo_ectx_size(struct nvkm_fifo *fifo) -{ - NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl; - struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp; - struct nvkm_runl *runl; - struct nvkm_engn *engn; - - ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, - NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO, - sizeof(*ctrl)); - if (WARN_ON(IS_ERR(ctrl))) - return PTR_ERR(ctrl); - - for (int i = 0; i < ctrl->numConstructedFalcons; i++) { - nvkm_runl_foreach(runl, fifo) { - nvkm_runl_foreach_engn(engn, runl) { - if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) { - engn->rm.size = - ctrl->constructedFalconsTable[i].ctxBufferSize; - break; - } - } - } - } - - nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); - return 0; -} - -static int -r535_fifo_runl_ctor(struct nvkm_fifo *fifo) -{ - struct nvkm_subdev *subdev = &fifo->engine.subdev; - struct nvkm_gsp *gsp = subdev->device->gsp; - struct nvkm_runl *runl; - struct nvkm_engn *engn; - u32 cgids = 2048; - u32 chids = 2048; - int ret; - NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl; - - if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, cgids, 0, cgids, &fifo->cgid)) || - (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid))) - return ret; - - ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, - NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE, sizeof(*ctrl)); - if (WARN_ON(IS_ERR(ctrl))) - return PTR_ERR(ctrl); - - for (int i = 0; i < ctrl->numEntries; i++) { - const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE]; - const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST]; - - runl = nvkm_runl_get(fifo, id, addr); - if (!runl) { - runl = nvkm_runl_new(fifo, id, addr, 0); - if (WARN_ON(IS_ERR(runl))) - continue; - } - } - - for (int i = 0; i < ctrl->numEntries; i++) { - const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE]; - const u32 rmid = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RM_ENGINE_TYPE]; - const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST]; - enum nvkm_subdev_type type; - int inst, nv2080; - - runl = nvkm_runl_get(fifo, id, addr); - if (!runl) - continue; - - inst = r535_fifo_engn_type(rmid, &type); - if (inst < 0) { - nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid); - nvkm_runl_del(runl); - continue; - } - - nv2080 = r535_fifo_2080_type(type, inst); - if (nv2080 < 0) { - nvkm_runl_del(runl); - continue; - } - - switch (type) { - case NVKM_ENGINE_CE: - engn = nvkm_runl_add(runl, nv2080, &r535_ce, type, inst); - break; - case NVKM_ENGINE_GR: - engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst); - break; - case NVKM_ENGINE_NVDEC: - case NVKM_ENGINE_NVENC: - case NVKM_ENGINE_NVJPG: - case NVKM_ENGINE_OFA: - engn = nvkm_runl_add(runl, nv2080, &r535_flcn, type, inst); - break; - case NVKM_ENGINE_SW: - continue; - default: - engn = NULL; - break; - } - - if (!engn) { - nvkm_runl_del(runl); - continue; - } - - engn->rm.desc = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_ENG_DESC]; - } - - nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); - - { - NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, - NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - fifo->rm.mthdbuf_size = ctrl->size; - - nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); - } - - return r535_fifo_ectx_size(fifo); -} - -static void -r535_fifo_dtor(struct nvkm_fifo *fifo) -{ - kfree(fifo->func); -} - -int -r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo) -{ - struct nvkm_fifo_func *rm; - - if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_fifo_dtor; - rm->runl_ctor = r535_fifo_runl_ctor; - rm->runl = &r535_runl; - rm->cgrp = hw->cgrp; - rm->cgrp.func = &r535_cgrp; - rm->chan = hw->chan; - rm->chan.func = &r535_chan; - rm->nonstall = &ga100_fifo_nonstall; - rm->nonstall_ctor = ga100_fifo_nonstall_ctor; - - return nvkm_fifo_new_(rm, device, type, inst, pfifo); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild index 1555f8c40b4f..487fcc14b9a9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild @@ -43,8 +43,6 @@ nvkm-y += nvkm/engine/gr/tu102.o nvkm-y += nvkm/engine/gr/ga102.o nvkm-y += nvkm/engine/gr/ad102.o -nvkm-y += nvkm/engine/gr/r535.o - nvkm-y += nvkm/engine/gr/ctxnv40.o nvkm-y += nvkm/engine/gr/ctxnv50.o nvkm-y += nvkm/engine/gr/ctxgf100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c deleted file mode 100644 index f4bed3eb1ec2..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c +++ /dev/null @@ -1,508 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "gf100.h" - -#include -#include -#include -#include - -#include - -#include - -#include -#include -#include -#include -#include -#include - -#define r535_gr(p) container_of((p), struct r535_gr, base) - -#define R515_GR_MAX_CTXBUFS 9 - -struct r535_gr { - struct nvkm_gr base; - - struct { - u16 bufferId; - u32 size; - u8 page; - u8 align; - bool global; - bool init; - bool ro; - } ctxbuf[R515_GR_MAX_CTXBUFS]; - int ctxbuf_nr; - - struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS]; -}; - -struct r535_gr_chan { - struct nvkm_object object; - struct r535_gr *gr; - - struct nvkm_vmm *vmm; - struct nvkm_chan *chan; - - struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS]; - struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS]; -}; - -struct r535_gr_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_gr_obj_dtor(struct nvkm_object *object) -{ - struct r535_gr_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_gr_obj = { - .dtor = r535_gr_obj_dtor, -}; - -static int -r535_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) -{ - struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object); - struct r535_gr_obj *obj; - - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_gr_obj, oclass, &obj->object); - *pobject = &obj->object; - - return nvkm_gsp_rm_alloc(&chan->chan->rm.object, oclass->handle, oclass->base.oclass, 0, - &obj->rm); -} - -static void * -r535_gr_chan_dtor(struct nvkm_object *object) -{ - struct r535_gr_chan *grc = container_of(object, typeof(*grc), object); - struct r535_gr *gr = grc->gr; - - for (int i = 0; i < gr->ctxbuf_nr; i++) { - nvkm_vmm_put(grc->vmm, &grc->vma[i]); - nvkm_memory_unref(&grc->mem[i]); - } - - nvkm_vmm_unref(&grc->vmm); - return grc; -} - -static const struct nvkm_object_func -r535_gr_chan = { - .dtor = r535_gr_chan_dtor, -}; - -static int -r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm, - struct nvkm_memory **pmem, struct nvkm_vma **pvma, - struct nvkm_gsp_object *chan) -{ - struct nvkm_subdev *subdev = &gr->base.engine.subdev; - struct nvkm_device *device = subdev->device; - NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice, - NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl)); - if (WARN_ON(IS_ERR(ctrl))) - return PTR_ERR(ctrl); - - ctrl->engineType = 1; - ctrl->hChanClient = vmm->rm.client.object.handle; - ctrl->hObject = chan->handle; - - for (int i = 0; i < gr->ctxbuf_nr; i++) { - NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry = - &ctrl->promoteEntry[ctrl->entryCount]; - const bool alloc = golden || !gr->ctxbuf[i].global; - int ret; - - entry->bufferId = gr->ctxbuf[i].bufferId; - entry->bInitialize = gr->ctxbuf[i].init && alloc; - - if (alloc) { - ret = nvkm_memory_new(device, gr->ctxbuf[i].init ? - NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST, - gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page, - gr->ctxbuf[i].init, &pmem[i]); - if (WARN_ON(ret)) - return ret; - - if (gr->ctxbuf[i].bufferId == - NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) - entry->bNonmapped = 1; - } else { - if (gr->ctxbuf[i].bufferId == - NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP) - continue; - - pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]); - } - - if (!entry->bNonmapped) { - struct gf100_vmm_map_v0 args = { - .priv = 1, - .ro = gr->ctxbuf[i].ro, - }; - - mutex_lock(&vmm->mutex.vmm); - ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align, - nvkm_memory_size(pmem[i]), &pvma[i]); - mutex_unlock(&vmm->mutex.vmm); - if (ret) - return ret; - - ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args)); - if (ret) - return ret; - - entry->gpuVirtAddr = pvma[i]->addr; - } - - if (entry->bInitialize) { - entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]); - entry->size = gr->ctxbuf[i].size; - entry->physAttr = 4; - } - - nvkm_debug(subdev, - "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n", - entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size, - entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped); - - ctrl->entryCount++; - } - - return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl); -} - -static int -r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass, - struct nvkm_object **pobject) -{ - struct r535_gr *gr = r535_gr(base); - struct r535_gr_chan *grc; - int ret; - - if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object); - grc->gr = gr; - grc->vmm = nvkm_vmm_ref(chan->vmm); - grc->chan = chan; - *pobject = &grc->object; - - ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object); - if (ret) - return ret; - - return 0; -} - -static u64 -r535_gr_units(struct nvkm_gr *gr) -{ - struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp; - - return (gsp->gr.tpcs << 8) | gsp->gr.gpcs; -} - -static int -r535_gr_oneinit(struct nvkm_gr *base) -{ - NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info; - struct r535_gr *gr = container_of(base, typeof(*gr), base); - struct nvkm_subdev *subdev = &gr->base.engine.subdev; - struct nvkm_device *device = subdev->device; - struct nvkm_gsp *gsp = device->gsp; - struct nvkm_mmu *mmu = device->mmu; - struct { - struct nvkm_memory *inst; - struct nvkm_vmm *vmm; - struct nvkm_gsp_object chan; - struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS]; - } golden = {}; - int ret; - - /* Allocate a channel to use for golden context init. */ - ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst); - if (ret) - goto done; - - ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm); - if (ret) - goto done; - - ret = mmu->func->promote_vmm(golden.vmm); - if (ret) - goto done; - - { - NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args; - - args = nvkm_gsp_rm_alloc_get(&golden.vmm->rm.device.object, 0xf1f00000, - device->fifo->func->chan.user.oclass, - sizeof(*args), &golden.chan); - if (IS_ERR(args)) { - ret = PTR_ERR(args); - goto done; - } - - args->gpFifoOffset = 0; - args->gpFifoEntries = 0x1000 / 8; - args->flags = - NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL) | - NVDEF(NVOS04, FLAGS, VPR, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE) | - NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, 0) | - NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE) | - NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE) | - NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, 0) | - NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE) | - NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, 0) | - NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE) | - NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE) | - NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE) | - NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT) | - NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE) | - NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE); - args->hVASpace = golden.vmm->rm.object.handle; - args->engineType = 1; - args->instanceMem.base = nvkm_memory_addr(golden.inst); - args->instanceMem.size = 0x1000; - args->instanceMem.addressSpace = 2; - args->instanceMem.cacheAttrib = 1; - args->ramfcMem.base = nvkm_memory_addr(golden.inst); - args->ramfcMem.size = 0x200; - args->ramfcMem.addressSpace = 2; - args->ramfcMem.cacheAttrib = 1; - args->userdMem.base = nvkm_memory_addr(golden.inst) + 0x1000; - args->userdMem.size = 0x200; - args->userdMem.addressSpace = 2; - args->userdMem.cacheAttrib = 1; - args->mthdbufMem.base = nvkm_memory_addr(golden.inst) + 0x2000; - args->mthdbufMem.size = 0x5000; - args->mthdbufMem.addressSpace = 2; - args->mthdbufMem.cacheAttrib = 1; - args->internalFlags = - NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN) | - NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE) | - NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE); - - ret = nvkm_gsp_rm_alloc_wr(&golden.chan, args); - if (ret) - goto done; - } - - /* Fetch context buffer info from RM and allocate each of them here to use - * during golden context init (or later as a global context buffer). - * - * Also build the information that'll be used to create channel contexts. - */ - info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, - NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO, - sizeof(*info)); - if (WARN_ON(IS_ERR(info))) { - ret = PTR_ERR(info); - goto done; - } - - for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) { - static const struct { - u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */ - u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */ - bool global; - bool init; - bool ro; - } map[] = { -#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \ - .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \ - .global = (G), .init = (I), .ro = (R) } -#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R)) - /* global init ro */ - _A( GRAPHICS, MAIN, false, true, false), - _B( PATCH, false, true, false), - _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false), - _B( PAGEPOOL, true, false, false), - _B( ATTRIBUTE_CB, true, false, false), - _B( RTV_CB_GLOBAL, true, false, false), - _B( FECS_EVENT, true, true, false), - _B( PRIV_ACCESS_MAP, true, true, true), -#undef _B -#undef _A - }; - u32 size = info->engineContextBuffersInfo[0].engine[i].size; - u8 align, page; - int id; - - for (id = 0; id < ARRAY_SIZE(map); id++) { - if (map[id].id0 == i) - break; - } - - nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i, - size, (id < ARRAY_SIZE(map)) ? "*" : ""); - if (id >= ARRAY_SIZE(map)) - continue; - - if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN) - size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */ - - if (size >= 1 << 21) page = 21; - else if (size >= 1 << 16) page = 16; - else page = 12; - - if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB) - align = order_base_2(size); - else - align = page; - - if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf))) - continue; - - gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1; - gr->ctxbuf[gr->ctxbuf_nr].size = size; - gr->ctxbuf[gr->ctxbuf_nr].page = page; - gr->ctxbuf[gr->ctxbuf_nr].align = align; - gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global; - gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init; - gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro; - gr->ctxbuf_nr++; - - if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) { - if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf))) - continue; - - gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1]; - gr->ctxbuf[gr->ctxbuf_nr].bufferId = - NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP; - gr->ctxbuf_nr++; - } - } - - nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info); - - /* Promote golden context to RM. */ - ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan); - if (ret) - goto done; - - /* Allocate 3D class on channel to trigger golden context init in RM. */ - { - int i; - - for (i = 0; gr->base.func->sclass[i].ctor; i++) { - if ((gr->base.func->sclass[i].oclass & 0xff) == 0x97) { - struct nvkm_gsp_object threed; - - ret = nvkm_gsp_rm_alloc(&golden.chan, 0x97000000, - gr->base.func->sclass[i].oclass, 0, - &threed); - if (ret) - goto done; - - nvkm_gsp_rm_free(&threed); - break; - } - } - - if (WARN_ON(!gr->base.func->sclass[i].ctor)) { - ret = -EINVAL; - goto done; - } - } - -done: - nvkm_gsp_rm_free(&golden.chan); - for (int i = gr->ctxbuf_nr - 1; i >= 0; i--) - nvkm_vmm_put(golden.vmm, &golden.vma[i]); - nvkm_vmm_unref(&golden.vmm); - nvkm_memory_unref(&golden.inst); - return ret; - -} - -static void * -r535_gr_dtor(struct nvkm_gr *base) -{ - struct r535_gr *gr = r535_gr(base); - - while (gr->ctxbuf_nr) - nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]); - - kfree(gr->base.func); - return gr; -} - -int -r535_gr_new(const struct gf100_gr_func *hw, - struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) -{ - struct nvkm_gr_func *rm; - struct r535_gr *gr; - int nclass; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_gr_dtor; - rm->oneinit = r535_gr_oneinit; - rm->units = r535_gr_units; - rm->chan_new = r535_gr_chan_new; - - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_gr_obj_ctor; - } - - if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) { - kfree(rm); - return -ENOMEM; - } - - *pgr = &gr->base; - - return nvkm_gr_ctor(rm, device, type, inst, true, &gr->base); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild index 2b0e923cb755..5cc317abc42c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild @@ -5,5 +5,3 @@ nvkm-y += nvkm/engine/nvdec/tu102.o nvkm-y += nvkm/engine/nvdec/ga100.o nvkm-y += nvkm/engine/nvdec/ga102.o nvkm-y += nvkm/engine/nvdec/ad102.o - -nvkm-y += nvkm/engine/nvdec/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c deleted file mode 100644 index 75a24f3e6617..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include -#include -#include - -#include -#include - -struct r535_nvdec_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_nvdec_obj_dtor(struct nvkm_object *object) -{ - struct r535_nvdec_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_nvdec_obj = { - .dtor = r535_nvdec_obj_dtor, -}; - -static int -r535_nvdec_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) -{ - struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); - struct r535_nvdec_obj *obj; - NV_BSP_ALLOCATION_PARAMETERS *args; - - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_nvdec_obj, oclass, &obj->object); - *pobject = &obj->object; - - args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, - sizeof(*args), &obj->rm); - if (WARN_ON(IS_ERR(args))) - return PTR_ERR(args); - - args->size = sizeof(*args); - args->engineInstance = oclass->engine->subdev.inst; - - return nvkm_gsp_rm_alloc_wr(&obj->rm, args); -} - -static void * -r535_nvdec_dtor(struct nvkm_engine *engine) -{ - struct nvkm_nvdec *nvdec = nvkm_nvdec(engine); - - kfree(nvdec->engine.func); - return nvdec; -} - -int -r535_nvdec_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec) -{ - struct nvkm_engine_func *rm; - int nclass; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_nvdec_dtor; - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_nvdec_obj_ctor; - } - - if (!(*pnvdec = kzalloc(sizeof(**pnvdec), GFP_KERNEL))) { - kfree(rm); - return -ENOMEM; - } - - return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvdec)->engine); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild index 2c1495b730f3..3d71f2973dab 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild @@ -4,5 +4,3 @@ nvkm-y += nvkm/engine/nvenc/gm107.o nvkm-y += nvkm/engine/nvenc/tu102.o nvkm-y += nvkm/engine/nvenc/ga102.o nvkm-y += nvkm/engine/nvenc/ad102.o - -nvkm-y += nvkm/engine/nvenc/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c deleted file mode 100644 index c8a2a9196ce5..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include -#include -#include - -#include -#include - -struct r535_nvenc_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_nvenc_obj_dtor(struct nvkm_object *object) -{ - struct r535_nvenc_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_nvenc_obj = { - .dtor = r535_nvenc_obj_dtor, -}; - -static int -r535_nvenc_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) -{ - struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); - struct r535_nvenc_obj *obj; - NV_MSENC_ALLOCATION_PARAMETERS *args; - - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_nvenc_obj, oclass, &obj->object); - *pobject = &obj->object; - - args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, - sizeof(*args), &obj->rm); - if (WARN_ON(IS_ERR(args))) - return PTR_ERR(args); - - args->size = sizeof(*args); - args->engineInstance = oclass->engine->subdev.inst; - - return nvkm_gsp_rm_alloc_wr(&obj->rm, args); -} - -static void * -r535_nvenc_dtor(struct nvkm_engine *engine) -{ - struct nvkm_nvenc *nvenc = nvkm_nvenc(engine); - - kfree(nvenc->engine.func); - return nvenc; -} - -int -r535_nvenc_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc) -{ - struct nvkm_engine_func *rm; - int nclass; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_nvenc_dtor; - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_nvenc_obj_ctor; - } - - if (!(*pnvenc = kzalloc(sizeof(**pnvenc), GFP_KERNEL))) { - kfree(rm); - return -ENOMEM; - } - - return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvenc)->engine); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild index 1408f664add6..1d9bddd68605 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild @@ -1,5 +1,3 @@ # SPDX-License-Identifier: MIT nvkm-y += nvkm/engine/nvjpg/ga100.o nvkm-y += nvkm/engine/nvjpg/ad102.o - -nvkm-y += nvkm/engine/nvjpg/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c deleted file mode 100644 index 1babddc4eb80..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include -#include -#include - -#include -#include - -struct r535_nvjpg_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_nvjpg_obj_dtor(struct nvkm_object *object) -{ - struct r535_nvjpg_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_nvjpg_obj = { - .dtor = r535_nvjpg_obj_dtor, -}; - -static int -r535_nvjpg_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) -{ - struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); - struct r535_nvjpg_obj *obj; - NV_NVJPG_ALLOCATION_PARAMETERS *args; - - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_nvjpg_obj, oclass, &obj->object); - *pobject = &obj->object; - - args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, - sizeof(*args), &obj->rm); - if (WARN_ON(IS_ERR(args))) - return PTR_ERR(args); - - args->size = sizeof(*args); - args->engineInstance = oclass->engine->subdev.inst; - - return nvkm_gsp_rm_alloc_wr(&obj->rm, args); -} - -static void * -r535_nvjpg_dtor(struct nvkm_engine *engine) -{ - kfree(engine->func); - return engine; -} - -int -r535_nvjpg_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) -{ - struct nvkm_engine_func *rm; - int nclass, ret; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_nvjpg_dtor; - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_nvjpg_obj_ctor; - } - - ret = nvkm_engine_new_(rm, device, type, inst, true, pengine); - if (ret) - kfree(rm); - - return ret; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild index 99f1713d7e51..3faf73b35f5a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild @@ -2,5 +2,3 @@ nvkm-y += nvkm/engine/ofa/ga100.o nvkm-y += nvkm/engine/ofa/ga102.o nvkm-y += nvkm/engine/ofa/ad102.o - -nvkm-y += nvkm/engine/ofa/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c deleted file mode 100644 index 438dc692eefe..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include -#include -#include -#include - -#include -#include - -struct r535_ofa_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_ofa_obj_dtor(struct nvkm_object *object) -{ - struct r535_ofa_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_ofa_obj = { - .dtor = r535_ofa_obj_dtor, -}; - -static int -r535_ofa_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) -{ - struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); - struct r535_ofa_obj *obj; - NV_OFA_ALLOCATION_PARAMETERS *args; - - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_ofa_obj, oclass, &obj->object); - *pobject = &obj->object; - - args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, - sizeof(*args), &obj->rm); - if (WARN_ON(IS_ERR(args))) - return PTR_ERR(args); - - args->size = sizeof(*args); - - return nvkm_gsp_rm_alloc_wr(&obj->rm, args); -} - -static void * -r535_ofa_dtor(struct nvkm_engine *engine) -{ - kfree(engine->func); - return engine; -} - -int -r535_ofa_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) -{ - struct nvkm_engine_func *rm; - int nclass, ret; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_ofa_dtor; - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_ofa_obj_ctor; - } - - ret = nvkm_engine_new_(rm, device, type, inst, true, pengine); - if (ret) - kfree(rm); - - return ret; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild index 9754c6872543..8faee3317a74 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild @@ -7,5 +7,3 @@ nvkm-y += nvkm/subdev/bar/gk20a.o nvkm-y += nvkm/subdev/bar/gm107.o nvkm-y += nvkm/subdev/bar/gm20b.o nvkm-y += nvkm/subdev/bar/tu102.o - -nvkm-y += nvkm/subdev/bar/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c deleted file mode 100644 index 90186f98065c..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "gf100.h" - -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -static void -r535_bar_flush(struct nvkm_bar *bar) -{ - ioread32_native(bar->flushBAR2); -} - -static void -r535_bar_bar2_wait(struct nvkm_bar *base) -{ -} - -static int -r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr) -{ - rpc_update_bar_pde_v15_00 *rpc; - - rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE, sizeof(*rpc)); - if (WARN_ON(IS_ERR_OR_NULL(rpc))) - return -EIO; - - rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2; - rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */ - rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu! - - return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); -} - -static void -r535_bar_bar2_fini(struct nvkm_bar *bar) -{ - struct nvkm_gsp *gsp = bar->subdev.device->gsp; - - bar->flushBAR2 = bar->flushBAR2PhysMode; - nvkm_done(bar->flushFBZero); - - WARN_ON(r535_bar_bar2_update_pde(gsp, 0)); -} - -static void -r535_bar_bar2_init(struct nvkm_bar *bar) -{ - struct nvkm_device *device = bar->subdev.device; - struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm; - struct nvkm_gsp *gsp = device->gsp; - - WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->pd->pde[0]->pt[0]->addr)); - vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb; - - if (!bar->flushFBZero) { - struct nvkm_memory *fbZero; - int ret; - - ret = nvkm_ram_wrap(device, 0, 0x1000, &fbZero); - if (ret == 0) { - ret = nvkm_memory_kmap(fbZero, &bar->flushFBZero); - nvkm_memory_unref(&fbZero); - } - WARN_ON(ret); - } - - bar->bar2 = true; - bar->flushBAR2 = nvkm_kmap(bar->flushFBZero); - WARN_ON(!bar->flushBAR2); -} - -static void -r535_bar_bar1_wait(struct nvkm_bar *base) -{ -} - -static void -r535_bar_bar1_fini(struct nvkm_bar *base) -{ -} - -static void -r535_bar_bar1_init(struct nvkm_bar *bar) -{ - struct nvkm_device *device = bar->subdev.device; - struct nvkm_gsp *gsp = device->gsp; - struct nvkm_vmm *vmm = gf100_bar(bar)->bar[1].vmm; - struct nvkm_memory *pd3; - int ret; - - ret = nvkm_ram_wrap(device, gsp->bar.rm_bar1_pdb, 0x1000, &pd3); - if (WARN_ON(ret)) - return; - - nvkm_memory_unref(&vmm->pd->pt[0]->memory); - - ret = nvkm_memory_kmap(pd3, &vmm->pd->pt[0]->memory); - nvkm_memory_unref(&pd3); - if (WARN_ON(ret)) - return; - - vmm->pd->pt[0]->addr = nvkm_memory_addr(vmm->pd->pt[0]->memory); -} - -static void * -r535_bar_dtor(struct nvkm_bar *bar) -{ - void *data = gf100_bar_dtor(bar); - - nvkm_memory_unref(&bar->flushFBZero); - - if (bar->flushBAR2PhysMode) - iounmap(bar->flushBAR2PhysMode); - - kfree(bar->func); - return data; -} - -int -r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar) -{ - struct nvkm_bar_func *rm; - struct nvkm_bar *bar; - int ret; - - if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_bar_dtor; - rm->oneinit = hw->oneinit; - rm->bar1.init = r535_bar_bar1_init; - rm->bar1.fini = r535_bar_bar1_fini; - rm->bar1.wait = r535_bar_bar1_wait; - rm->bar1.vmm = hw->bar1.vmm; - rm->bar2.init = r535_bar_bar2_init; - rm->bar2.fini = r535_bar_bar2_fini; - rm->bar2.wait = r535_bar_bar2_wait; - rm->bar2.vmm = hw->bar2.vmm; - rm->flush = r535_bar_flush; - - ret = gf100_bar_new_(rm, device, type, inst, &bar); - if (ret) { - kfree(rm); - return ret; - } - *pbar = bar; - - bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE); - if (!bar->flushBAR2PhysMode) - return -ENOMEM; - - bar->flushBAR2 = bar->flushBAR2PhysMode; - - gf100_bar(*pbar)->bar2_halve = true; - return 0; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild index af6e55603763..ba892c111c26 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild @@ -9,6 +9,4 @@ nvkm-y += nvkm/subdev/gsp/ga100.o nvkm-y += nvkm/subdev/gsp/ga102.o nvkm-y += nvkm/subdev/gsp/ad102.o -nvkm-y += nvkm/subdev/gsp/r535.o - include $(src)/nvkm/subdev/gsp/rm/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c deleted file mode 100644 index f42879b2ea7e..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ /dev/null @@ -1,2252 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include - -#include "priv.h" - -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -extern struct dentry *nouveau_debugfs_root; - -const struct nvkm_gsp_rm -r535_gsp_rm = { - .api = &r535_rm, -}; - -static void -r535_gsp_msgq_work(struct work_struct *work) -{ - struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work); - - mutex_lock(&gsp->cmdq.mutex); - if (*gsp->msgq.rptr != *gsp->msgq.wptr) - r535_gsp_msg_recv(gsp, 0, 0); - mutex_unlock(&gsp->cmdq.mutex); -} - -static irqreturn_t -r535_gsp_intr(struct nvkm_inth *inth) -{ - struct nvkm_gsp *gsp = container_of(inth, typeof(*gsp), subdev.inth); - struct nvkm_subdev *subdev = &gsp->subdev; - u32 intr = nvkm_falcon_rd32(&gsp->falcon, 0x0008); - u32 inte = nvkm_falcon_rd32(&gsp->falcon, gsp->falcon.func->addr2 + - gsp->falcon.func->riscv_irqmask); - u32 stat = intr & inte; - - if (!stat) { - nvkm_debug(subdev, "inte %08x %08x\n", intr, inte); - return IRQ_NONE; - } - - if (stat & 0x00000040) { - nvkm_falcon_wr32(&gsp->falcon, 0x004, 0x00000040); - schedule_work(&gsp->msgq.work); - stat &= ~0x00000040; - } - - if (stat) { - nvkm_error(subdev, "intr %08x\n", stat); - nvkm_falcon_wr32(&gsp->falcon, 0x014, stat); - nvkm_falcon_wr32(&gsp->falcon, 0x004, stat); - } - - nvkm_falcon_intr_retrigger(&gsp->falcon); - return IRQ_HANDLED; -} - -static int -r535_gsp_intr_get_table(struct nvkm_gsp *gsp) -{ - NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl; - int ret = 0; - - ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, - NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl)); - if (WARN_ON(ret)) { - nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); - return ret; - } - - for (unsigned i = 0; i < ctrl->tableLen; i++) { - enum nvkm_subdev_type type; - int inst; - - nvkm_debug(&gsp->subdev, - "%2d: engineIdx %3d pmcIntrMask %08x stall %08x nonStall %08x\n", i, - ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask, - ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall); - - switch (ctrl->table[i].engineIdx) { - case MC_ENGINE_IDX_GSP: - type = NVKM_SUBDEV_GSP; - inst = 0; - break; - case MC_ENGINE_IDX_DISP: - type = NVKM_ENGINE_DISP; - inst = 0; - break; - case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9: - type = NVKM_ENGINE_CE; - inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0; - break; - case MC_ENGINE_IDX_GR0: - type = NVKM_ENGINE_GR; - inst = 0; - break; - case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7: - type = NVKM_ENGINE_NVDEC; - inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0; - break; - case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2: - type = NVKM_ENGINE_NVENC; - inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC; - break; - case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7: - type = NVKM_ENGINE_NVJPG; - inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0; - break; - case MC_ENGINE_IDX_OFA0: - type = NVKM_ENGINE_OFA; - inst = 0; - break; - default: - continue; - } - - if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) { - ret = -ENOSPC; - break; - } - - gsp->intr[gsp->intr_nr].type = type; - gsp->intr[gsp->intr_nr].inst = inst; - gsp->intr[gsp->intr_nr].stall = ctrl->table[i].vectorStall; - gsp->intr[gsp->intr_nr].nonstall = ctrl->table[i].vectorNonStall; - gsp->intr_nr++; - } - - nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); - return ret; -} - -static int -r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) -{ - GspStaticConfigInfo *rpc; - int last_usable = -1; - - rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc)); - if (IS_ERR(rpc)) - return PTR_ERR(rpc); - - gsp->internal.client.object.client = &gsp->internal.client; - gsp->internal.client.object.parent = NULL; - gsp->internal.client.object.handle = rpc->hInternalClient; - gsp->internal.client.gsp = gsp; - - gsp->internal.device.object.client = &gsp->internal.client; - gsp->internal.device.object.parent = &gsp->internal.client.object; - gsp->internal.device.object.handle = rpc->hInternalDevice; - - gsp->internal.device.subdevice.client = &gsp->internal.client; - gsp->internal.device.subdevice.parent = &gsp->internal.device.object; - gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice; - - gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase; - gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase; - - for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) { - NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = - &rpc->fbRegionInfoParams.fbRegion[i]; - - nvkm_debug(&gsp->subdev, "fb region %d: " - "%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i, - reg->base, reg->limit, reg->reserved, reg->performance, - reg->supportCompressed, reg->supportISO, reg->bProtected); - - if (!reg->reserved && !reg->bProtected) { - if (reg->supportCompressed && reg->supportISO && - !WARN_ON_ONCE(gsp->fb.region_nr >= ARRAY_SIZE(gsp->fb.region))) { - const u64 size = (reg->limit + 1) - reg->base; - - gsp->fb.region[gsp->fb.region_nr].addr = reg->base; - gsp->fb.region[gsp->fb.region_nr].size = size; - gsp->fb.region_nr++; - } - - last_usable = i; - } - } - - if (last_usable >= 0) { - u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1; - - gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base; - } - - for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) { - if (rpc->gpcInfo.gpcMask & BIT(gpc)) { - gsp->gr.tpcs += hweight32(rpc->tpcInfo[gpc].tpcMask); - gsp->gr.gpcs++; - } - } - - nvkm_gsp_rpc_done(gsp, rpc); - return 0; -} - -static void -nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem) -{ - if (mem->data) { - /* - * Poison the buffer to catch any unexpected access from - * GSP-RM if the buffer was prematurely freed. - */ - memset(mem->data, 0xFF, mem->size); - - dma_free_coherent(mem->dev, mem->size, mem->data, mem->addr); - put_device(mem->dev); - - memset(mem, 0, sizeof(*mem)); - } -} - -/** - * nvkm_gsp_mem_ctor - constructor for nvkm_gsp_mem objects - * @gsp: gsp pointer - * @size: number of bytes to allocate - * @mem: nvkm_gsp_mem object to initialize - * - * Allocates a block of memory for use with GSP. - * - * This memory block can potentially out-live the driver's remove() callback, - * so we take a device reference to ensure its lifetime. The reference is - * dropped in the destructor. - */ -static int -nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem) -{ - mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); - if (WARN_ON(!mem->data)) - return -ENOMEM; - - mem->size = size; - mem->dev = get_device(gsp->subdev.device->dev); - - return 0; -} - -static int -r535_gsp_postinit(struct nvkm_gsp *gsp) -{ - struct nvkm_device *device = gsp->subdev.device; - int ret; - - ret = r535_gsp_rpc_get_gsp_static_info(gsp); - if (WARN_ON(ret)) - return ret; - - INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work); - - ret = r535_gsp_intr_get_table(gsp); - if (WARN_ON(ret)) - return ret; - - ret = nvkm_gsp_intr_stall(gsp, gsp->subdev.type, gsp->subdev.inst); - if (WARN_ON(ret < 0)) - return ret; - - ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &gsp->subdev, - r535_gsp_intr, &gsp->subdev.inth); - if (WARN_ON(ret)) - return ret; - - nvkm_inth_allow(&gsp->subdev.inth); - nvkm_wr32(device, 0x110004, 0x00000040); - - /* Release the DMA buffers that were needed only for boot and init */ - nvkm_gsp_mem_dtor(&gsp->boot.fw); - nvkm_gsp_mem_dtor(&gsp->libos); - - return ret; -} - -static int -r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend) -{ - rpc_unloading_guest_driver_v1F_07 *rpc; - - rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, sizeof(*rpc)); - if (IS_ERR(rpc)) - return PTR_ERR(rpc); - - if (suspend) { - rpc->bInPMTransition = 1; - rpc->bGc6Entering = 0; - rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; - } else { - rpc->bInPMTransition = 0; - rpc->bGc6Entering = 0; - rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0; - } - - return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); -} - -enum registry_type { - REGISTRY_TABLE_ENTRY_TYPE_DWORD = 1, /* 32-bit unsigned integer */ - REGISTRY_TABLE_ENTRY_TYPE_BINARY = 2, /* Binary blob */ - REGISTRY_TABLE_ENTRY_TYPE_STRING = 3, /* Null-terminated string */ -}; - -/* An arbitrary limit to the length of a registry key */ -#define REGISTRY_MAX_KEY_LENGTH 64 - -/** - * struct registry_list_entry - linked list member for a registry key/value - * @head: list_head struct - * @type: dword, binary, or string - * @klen: the length of name of the key - * @vlen: the length of the value - * @key: the key name - * @dword: the data, if REGISTRY_TABLE_ENTRY_TYPE_DWORD - * @binary: the data, if TYPE_BINARY or TYPE_STRING - * - * Every registry key/value is represented internally by this struct. - * - * Type DWORD is a simple 32-bit unsigned integer, and its value is stored in - * @dword. - * - * Types BINARY and STRING are variable-length binary blobs. The only real - * difference between BINARY and STRING is that STRING is null-terminated and - * is expected to contain only printable characters. - * - * Note: it is technically possible to have multiple keys with the same name - * but different types, but this is not useful since GSP-RM expects keys to - * have only one specific type. - */ -struct registry_list_entry { - struct list_head head; - enum registry_type type; - size_t klen; - char key[REGISTRY_MAX_KEY_LENGTH]; - size_t vlen; - u32 dword; /* TYPE_DWORD */ - u8 binary[] __counted_by(vlen); /* TYPE_BINARY or TYPE_STRING */ -}; - -/** - * add_registry -- adds a registry entry - * @gsp: gsp pointer - * @key: name of the registry key - * @type: type of data - * @data: pointer to value - * @length: size of data, in bytes - * - * Adds a registry key/value pair to the registry database. - * - * This function collects the registry information in a linked list. After - * all registry keys have been added, build_registry() is used to create the - * RPC data structure. - * - * registry_rpc_size is a running total of the size of all registry keys. - * It's used to avoid an O(n) calculation of the size when the RPC is built. - * - * Returns 0 on success, or negative error code on error. - */ -static int add_registry(struct nvkm_gsp *gsp, const char *key, - enum registry_type type, const void *data, size_t length) -{ - struct registry_list_entry *reg; - const size_t nlen = strnlen(key, REGISTRY_MAX_KEY_LENGTH) + 1; - size_t alloc_size; /* extra bytes to alloc for binary or string value */ - - if (nlen > REGISTRY_MAX_KEY_LENGTH) - return -EINVAL; - - alloc_size = (type == REGISTRY_TABLE_ENTRY_TYPE_DWORD) ? 0 : length; - - reg = kmalloc(sizeof(*reg) + alloc_size, GFP_KERNEL); - if (!reg) - return -ENOMEM; - - switch (type) { - case REGISTRY_TABLE_ENTRY_TYPE_DWORD: - reg->dword = *(const u32 *)(data); - break; - case REGISTRY_TABLE_ENTRY_TYPE_BINARY: - case REGISTRY_TABLE_ENTRY_TYPE_STRING: - memcpy(reg->binary, data, alloc_size); - break; - default: - nvkm_error(&gsp->subdev, "unrecognized registry type %u for '%s'\n", - type, key); - kfree(reg); - return -EINVAL; - } - - memcpy(reg->key, key, nlen); - reg->klen = nlen; - reg->vlen = length; - reg->type = type; - - list_add_tail(®->head, &gsp->registry_list); - gsp->registry_rpc_size += sizeof(PACKED_REGISTRY_ENTRY) + nlen + alloc_size; - - return 0; -} - -static int add_registry_num(struct nvkm_gsp *gsp, const char *key, u32 value) -{ - return add_registry(gsp, key, REGISTRY_TABLE_ENTRY_TYPE_DWORD, - &value, sizeof(u32)); -} - -static int add_registry_string(struct nvkm_gsp *gsp, const char *key, const char *value) -{ - return add_registry(gsp, key, REGISTRY_TABLE_ENTRY_TYPE_STRING, - value, strlen(value) + 1); -} - -/** - * build_registry -- create the registry RPC data - * @gsp: gsp pointer - * @registry: pointer to the RPC payload to fill - * - * After all registry key/value pairs have been added, call this function to - * build the RPC. - * - * The registry RPC looks like this: - * - * +-----------------+ - * |NvU32 size; | - * |NvU32 numEntries;| - * +-----------------+ - * +----------------------------------------+ - * |PACKED_REGISTRY_ENTRY | - * +----------------------------------------+ - * |Null-terminated key (string) for entry 0| - * +----------------------------------------+ - * |Binary/string data value for entry 0 | (only if necessary) - * +----------------------------------------+ - * - * +----------------------------------------+ - * |PACKED_REGISTRY_ENTRY | - * +----------------------------------------+ - * |Null-terminated key (string) for entry 1| - * +----------------------------------------+ - * |Binary/string data value for entry 1 | (only if necessary) - * +----------------------------------------+ - * ... (and so on, one copy for each entry) - * - * - * The 'data' field of an entry is either a 32-bit integer (for type DWORD) - * or an offset into the PACKED_REGISTRY_TABLE (for types BINARY and STRING). - * - * All memory allocated by add_registry() is released. - */ -static void build_registry(struct nvkm_gsp *gsp, PACKED_REGISTRY_TABLE *registry) -{ - struct registry_list_entry *reg, *n; - size_t str_offset; - unsigned int i = 0; - - registry->numEntries = list_count_nodes(&gsp->registry_list); - str_offset = struct_size(registry, entries, registry->numEntries); - - list_for_each_entry_safe(reg, n, &gsp->registry_list, head) { - registry->entries[i].type = reg->type; - registry->entries[i].length = reg->vlen; - - /* Append the key name to the table */ - registry->entries[i].nameOffset = str_offset; - memcpy((void *)registry + str_offset, reg->key, reg->klen); - str_offset += reg->klen; - - switch (reg->type) { - case REGISTRY_TABLE_ENTRY_TYPE_DWORD: - registry->entries[i].data = reg->dword; - break; - case REGISTRY_TABLE_ENTRY_TYPE_BINARY: - case REGISTRY_TABLE_ENTRY_TYPE_STRING: - /* If the type is binary or string, also append the value */ - memcpy((void *)registry + str_offset, reg->binary, reg->vlen); - registry->entries[i].data = str_offset; - str_offset += reg->vlen; - break; - default: - break; - } - - i++; - list_del(®->head); - kfree(reg); - } - - /* Double-check that we calculated the sizes correctly */ - WARN_ON(gsp->registry_rpc_size != str_offset); - - registry->size = gsp->registry_rpc_size; -} - -/** - * clean_registry -- clean up registry memory in case of error - * @gsp: gsp pointer - * - * Call this function to clean up all memory allocated by add_registry() - * in case of error and build_registry() is not called. - */ -static void clean_registry(struct nvkm_gsp *gsp) -{ - struct registry_list_entry *reg, *n; - - list_for_each_entry_safe(reg, n, &gsp->registry_list, head) { - list_del(®->head); - kfree(reg); - } - - gsp->registry_rpc_size = sizeof(PACKED_REGISTRY_TABLE); -} - -MODULE_PARM_DESC(NVreg_RegistryDwords, - "A semicolon-separated list of key=integer pairs of GSP-RM registry keys"); -static char *NVreg_RegistryDwords; -module_param(NVreg_RegistryDwords, charp, 0400); - -/* dword only */ -struct nv_gsp_registry_entries { - const char *name; - u32 value; -}; - -/* - * r535_registry_entries - required registry entries for GSP-RM - * - * This array lists registry entries that are required for GSP-RM to - * function correctly. - * - * RMSecBusResetEnable - enables PCI secondary bus reset - * RMForcePcieConfigSave - forces GSP-RM to preserve PCI configuration - * registers on any PCI reset. - */ -static const struct nv_gsp_registry_entries r535_registry_entries[] = { - { "RMSecBusResetEnable", 1 }, - { "RMForcePcieConfigSave", 1 }, -}; -#define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries) - -/** - * strip - strips all characters in 'reject' from 's' - * @s: string to strip - * @reject: string of characters to remove - * - * 's' is modified. - * - * Returns the length of the new string. - */ -static size_t strip(char *s, const char *reject) -{ - char *p = s, *p2 = s; - size_t length = 0; - char c; - - do { - while ((c = *p2) && strchr(reject, c)) - p2++; - - *p++ = c = *p2++; - length++; - } while (c); - - return length; -} - -/** - * r535_gsp_rpc_set_registry - build registry RPC and call GSP-RM - * @gsp: gsp pointer - * - * The GSP-RM registry is a set of key/value pairs that configure some aspects - * of GSP-RM. The keys are strings, and the values are 32-bit integers. - * - * The registry is built from a combination of a static hard-coded list (see - * above) and entries passed on the driver's command line. - */ -static int -r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp) -{ - PACKED_REGISTRY_TABLE *rpc; - unsigned int i; - int ret; - - INIT_LIST_HEAD(&gsp->registry_list); - gsp->registry_rpc_size = sizeof(PACKED_REGISTRY_TABLE); - - for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) { - ret = add_registry_num(gsp, r535_registry_entries[i].name, - r535_registry_entries[i].value); - if (ret) - goto fail; - } - - /* - * The NVreg_RegistryDwords parameter is a string of key=value - * pairs separated by semicolons. We need to extract and trim each - * substring, and then parse the substring to extract the key and - * value. - */ - if (NVreg_RegistryDwords) { - char *p = kstrdup(NVreg_RegistryDwords, GFP_KERNEL); - char *start, *next = p, *equal; - - if (!p) { - ret = -ENOMEM; - goto fail; - } - - /* Remove any whitespace from the parameter string */ - strip(p, " \t\n"); - - while ((start = strsep(&next, ";"))) { - long value; - - equal = strchr(start, '='); - if (!equal || equal == start || equal[1] == 0) { - nvkm_error(&gsp->subdev, - "ignoring invalid registry string '%s'\n", - start); - continue; - } - - /* Truncate the key=value string to just key */ - *equal = 0; - - ret = kstrtol(equal + 1, 0, &value); - if (!ret) { - ret = add_registry_num(gsp, start, value); - } else { - /* Not a number, so treat it as a string */ - ret = add_registry_string(gsp, start, equal + 1); - } - - if (ret) { - nvkm_error(&gsp->subdev, - "ignoring invalid registry key/value '%s=%s'\n", - start, equal + 1); - continue; - } - } - - kfree(p); - } - - rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_SET_REGISTRY, gsp->registry_rpc_size); - if (IS_ERR(rpc)) { - ret = PTR_ERR(rpc); - goto fail; - } - - build_registry(gsp, rpc); - - return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT); - -fail: - clean_registry(gsp); - return ret; -} - -#if defined(CONFIG_ACPI) && defined(CONFIG_X86) -static void -r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) -{ - const guid_t NVOP_DSM_GUID = - GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B, - 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0); - u64 NVOP_DSM_REV = 0x00000100; - union acpi_object argv4 = { - .buffer.type = ACPI_TYPE_BUFFER, - .buffer.length = 4, - .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), - }, *obj; - - caps->status = 0xffff; - - if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a))) - return; - - obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4); - if (!obj) - return; - - if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || - WARN_ON(obj->buffer.length != 4)) - return; - - caps->status = 0; - caps->optimusCaps = *(u32 *)obj->buffer.pointer; - - ACPI_FREE(obj); - - kfree(argv4.buffer.pointer); -} - -static void -r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) -{ - const guid_t JT_DSM_GUID = - GUID_INIT(0xCBECA351L, 0x067B, 0x4924, - 0x9C, 0xBD, 0xB4, 0x6B, 0x00, 0xB8, 0x6F, 0x34); - u64 JT_DSM_REV = 0x00000103; - u32 caps; - union acpi_object argv4 = { - .buffer.type = ACPI_TYPE_BUFFER, - .buffer.length = sizeof(caps), - .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), - }, *obj; - - jt->status = 0xffff; - - obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4); - if (!obj) - return; - - if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || - WARN_ON(obj->buffer.length != 4)) - return; - - jt->status = 0; - jt->jtCaps = *(u32 *)obj->buffer.pointer; - jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20; - jt->bSBIOSCaps = 0; - - ACPI_FREE(obj); - - kfree(argv4.buffer.pointer); -} - -static void -r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode, - MUX_METHOD_DATA_ELEMENT *part) -{ - union acpi_object mux_arg = { ACPI_TYPE_INTEGER }; - struct acpi_object_list input = { 1, &mux_arg }; - acpi_handle iter = NULL, handle_mux = NULL; - acpi_status status; - unsigned long long value; - - mode->status = 0xffff; - part->status = 0xffff; - - do { - status = acpi_get_next_object(ACPI_TYPE_DEVICE, handle, iter, &iter); - if (ACPI_FAILURE(status) || !iter) - return; - - status = acpi_evaluate_integer(iter, "_ADR", NULL, &value); - if (ACPI_FAILURE(status) || value != id) - continue; - - handle_mux = iter; - } while (!handle_mux); - - if (!handle_mux) - return; - - /* I -think- 0 means "acquire" according to nvidia's driver source */ - input.pointer->integer.type = ACPI_TYPE_INTEGER; - input.pointer->integer.value = 0; - - status = acpi_evaluate_integer(handle_mux, "MXDM", &input, &value); - if (ACPI_SUCCESS(status)) { - mode->acpiId = id; - mode->mode = value; - mode->status = 0; - } - - status = acpi_evaluate_integer(handle_mux, "MXDS", &input, &value); - if (ACPI_SUCCESS(status)) { - part->acpiId = id; - part->mode = value; - part->status = 0; - } -} - -static void -r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux) -{ - mux->tableLen = dod->acpiIdListLen / sizeof(dod->acpiIdList[0]); - - for (int i = 0; i < mux->tableLen; i++) { - r535_gsp_acpi_mux_id(handle, dod->acpiIdList[i], &mux->acpiIdMuxModeTable[i], - &mux->acpiIdMuxPartTable[i]); - } -} - -static void -r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod) -{ - acpi_status status; - struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *_DOD; - - dod->status = 0xffff; - - status = acpi_evaluate_object(handle, "_DOD", NULL, &output); - if (ACPI_FAILURE(status)) - return; - - _DOD = output.pointer; - - if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) || - WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList))) - return; - - for (int i = 0; i < _DOD->package.count; i++) { - if (WARN_ON(_DOD->package.elements[i].type != ACPI_TYPE_INTEGER)) - return; - - dod->acpiIdList[i] = _DOD->package.elements[i].integer.value; - dod->acpiIdListLen += sizeof(dod->acpiIdList[0]); - } - - dod->status = 0; - kfree(output.pointer); -} -#endif - -static void -r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi) -{ -#if defined(CONFIG_ACPI) && defined(CONFIG_X86) - acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev); - - if (!handle) - return; - - acpi->bValid = 1; - - r535_gsp_acpi_dod(handle, &acpi->dodMethodData); - if (acpi->dodMethodData.status == 0) - r535_gsp_acpi_mux(handle, &acpi->dodMethodData, &acpi->muxMethodData); - - r535_gsp_acpi_jt(handle, &acpi->jtMethodData); - r535_gsp_acpi_caps(handle, &acpi->capsMethodData); -#endif -} - -static int -r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp) -{ - struct nvkm_device *device = gsp->subdev.device; - struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device); - GspSystemInfo *info; - - if (WARN_ON(device->type == NVKM_DEVICE_TEGRA)) - return -ENOSYS; - - info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info)); - if (IS_ERR(info)) - return PTR_ERR(info); - - info->gpuPhysAddr = device->func->resource_addr(device, 0); - info->gpuPhysFbAddr = device->func->resource_addr(device, 1); - info->gpuPhysInstAddr = device->func->resource_addr(device, 3); - info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev); - info->maxUserVa = TASK_SIZE; - info->pciConfigMirrorBase = 0x088000; - info->pciConfigMirrorSize = 0x001000; - r535_gsp_acpi_info(gsp, &info->acpiMethodData); - - return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT); -} - -static int -r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc) -{ - struct nvkm_gsp *gsp = priv; - struct nvkm_subdev *subdev = &gsp->subdev; - rpc_os_error_log_v17_00 *msg = repv; - - if (WARN_ON(repc < sizeof(*msg))) - return -EINVAL; - - nvkm_error(subdev, "Xid:%d %s\n", msg->exceptType, msg->errString); - return 0; -} - -static int -r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc) -{ - rpc_rc_triggered_v17_02 *msg = repv; - struct nvkm_gsp *gsp = priv; - struct nvkm_subdev *subdev = &gsp->subdev; - struct nvkm_chan *chan; - unsigned long flags; - - if (WARN_ON(repc < sizeof(*msg))) - return -EINVAL; - - nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n", - msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope, - msg->partitionAttributionId); - - chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid, &flags); - if (!chan) { - nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid); - return 0; - } - - nvkm_chan_error(chan, false); - nvkm_chan_put(&chan, flags); - return 0; -} - -static int -r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc) -{ - struct nvkm_gsp *gsp = priv; - struct nvkm_subdev *subdev = &gsp->subdev; - - WARN_ON(repc != 0); - - nvkm_error(subdev, "mmu fault queued\n"); - return 0; -} - -static int -r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc) -{ - struct nvkm_gsp *gsp = priv; - struct nvkm_gsp_client *client; - struct nvkm_subdev *subdev = &gsp->subdev; - rpc_post_event_v17_00 *msg = repv; - - if (WARN_ON(repc < sizeof(*msg))) - return -EINVAL; - if (WARN_ON(repc != sizeof(*msg) + msg->eventDataSize)) - return -EINVAL; - - nvkm_debug(subdev, "event: %08x %08x %d %08x %08x %d %d\n", - msg->hClient, msg->hEvent, msg->notifyIndex, msg->data, - msg->status, msg->eventDataSize, msg->bNotifyList); - - mutex_lock(&gsp->client_id.mutex); - client = idr_find(&gsp->client_id.idr, msg->hClient & 0xffff); - if (client) { - struct nvkm_gsp_event *event; - bool handled = false; - - list_for_each_entry(event, &client->events, head) { - if (event->object.handle == msg->hEvent) { - event->func(event, msg->eventData, msg->eventDataSize); - handled = true; - } - } - - if (!handled) { - nvkm_error(subdev, "event: cid 0x%08x event 0x%08x not found!\n", - msg->hClient, msg->hEvent); - } - } else { - nvkm_error(subdev, "event: cid 0x%08x not found!\n", msg->hClient); - } - mutex_unlock(&gsp->client_id.mutex); - return 0; -} - -/** - * r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP - * @priv: gsp pointer - * @fn: function number (ignored) - * @repv: pointer to libos print RPC - * @repc: message size - * - * The GSP sequencer is a list of I/O commands that the GSP can send to - * the driver to perform for various purposes. The most common usage is to - * perform a special mid-initialization reset. - */ -static int -r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc) -{ - struct nvkm_gsp *gsp = priv; - struct nvkm_subdev *subdev = &gsp->subdev; - struct nvkm_device *device = subdev->device; - rpc_run_cpu_sequencer_v17_00 *seq = repv; - int ptr = 0, ret; - - nvkm_debug(subdev, "seq: %08x %08x\n", seq->bufferSizeDWord, seq->cmdIndex); - - while (ptr < seq->cmdIndex) { - GSP_SEQUENCER_BUFFER_CMD *cmd = (void *)&seq->commandBuffer[ptr]; - - ptr += 1; - ptr += GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(cmd->opCode); - - switch (cmd->opCode) { - case GSP_SEQ_BUF_OPCODE_REG_WRITE: { - u32 addr = cmd->payload.regWrite.addr; - u32 data = cmd->payload.regWrite.val; - - nvkm_trace(subdev, "seq wr32 %06x %08x\n", addr, data); - nvkm_wr32(device, addr, data); - } - break; - case GSP_SEQ_BUF_OPCODE_REG_MODIFY: { - u32 addr = cmd->payload.regModify.addr; - u32 mask = cmd->payload.regModify.mask; - u32 data = cmd->payload.regModify.val; - - nvkm_trace(subdev, "seq mask %06x %08x %08x\n", addr, mask, data); - nvkm_mask(device, addr, mask, data); - } - break; - case GSP_SEQ_BUF_OPCODE_REG_POLL: { - u32 addr = cmd->payload.regPoll.addr; - u32 mask = cmd->payload.regPoll.mask; - u32 data = cmd->payload.regPoll.val; - u32 usec = cmd->payload.regPoll.timeout ?: 4000000; - //u32 error = cmd->payload.regPoll.error; - - nvkm_trace(subdev, "seq poll %06x %08x %08x %d\n", addr, mask, data, usec); - nvkm_rd32(device, addr); - nvkm_usec(device, usec, - if ((nvkm_rd32(device, addr) & mask) == data) - break; - ); - } - break; - case GSP_SEQ_BUF_OPCODE_DELAY_US: { - u32 usec = cmd->payload.delayUs.val; - - nvkm_trace(subdev, "seq usec %d\n", usec); - udelay(usec); - } - break; - case GSP_SEQ_BUF_OPCODE_REG_STORE: { - u32 addr = cmd->payload.regStore.addr; - u32 slot = cmd->payload.regStore.index; - - seq->regSaveArea[slot] = nvkm_rd32(device, addr); - nvkm_trace(subdev, "seq save %08x -> %d: %08x\n", addr, slot, - seq->regSaveArea[slot]); - } - break; - case GSP_SEQ_BUF_OPCODE_CORE_RESET: - nvkm_trace(subdev, "seq core reset\n"); - nvkm_falcon_reset(&gsp->falcon); - nvkm_falcon_mask(&gsp->falcon, 0x624, 0x00000080, 0x00000080); - nvkm_falcon_wr32(&gsp->falcon, 0x10c, 0x00000000); - break; - case GSP_SEQ_BUF_OPCODE_CORE_START: - nvkm_trace(subdev, "seq core start\n"); - if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000040) - nvkm_falcon_wr32(&gsp->falcon, 0x130, 0x00000002); - else - nvkm_falcon_wr32(&gsp->falcon, 0x100, 0x00000002); - break; - case GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT: - nvkm_trace(subdev, "seq core wait halt\n"); - nvkm_msec(device, 2000, - if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000010) - break; - ); - break; - case GSP_SEQ_BUF_OPCODE_CORE_RESUME: { - struct nvkm_sec2 *sec2 = device->sec2; - u32 mbox0; - - nvkm_trace(subdev, "seq core resume\n"); - - ret = gsp->func->reset(gsp); - if (WARN_ON(ret)) - return ret; - - nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); - nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); - - nvkm_falcon_start(&sec2->falcon); - - if (nvkm_msec(device, 2000, - if (nvkm_rd32(device, 0x1180f8) & 0x04000000) - break; - ) < 0) - return -ETIMEDOUT; - - mbox0 = nvkm_falcon_rd32(&sec2->falcon, 0x040); - if (WARN_ON(mbox0)) { - nvkm_error(&gsp->subdev, "seq core resume sec2: 0x%x\n", mbox0); - return -EIO; - } - - nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); - - if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) - return -EIO; - } - break; - default: - nvkm_error(subdev, "unknown sequencer opcode %08x\n", cmd->opCode); - return -EINVAL; - } - } - - return 0; -} - -static int -r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp) -{ - GspFwWprMeta *meta; - int ret; - - ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta); - if (ret) - return ret; - - meta = gsp->wpr_meta.data; - - meta->magic = GSP_FW_WPR_META_MAGIC; - meta->revision = GSP_FW_WPR_META_REVISION; - - meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr; - meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size; - - meta->sysmemAddrOfBootloader = gsp->boot.fw.addr; - meta->sizeOfBootloader = gsp->boot.fw.size; - meta->bootloaderCodeOffset = gsp->boot.code_offset; - meta->bootloaderDataOffset = gsp->boot.data_offset; - meta->bootloaderManifestOffset = gsp->boot.manifest_offset; - - meta->sysmemAddrOfSignature = gsp->sig.addr; - meta->sizeOfSignature = gsp->sig.size; - - meta->gspFwRsvdStart = gsp->fb.heap.addr; - meta->nonWprHeapOffset = gsp->fb.heap.addr; - meta->nonWprHeapSize = gsp->fb.heap.size; - meta->gspFwWprStart = gsp->fb.wpr2.addr; - meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr; - meta->gspFwHeapSize = gsp->fb.wpr2.heap.size; - meta->gspFwOffset = gsp->fb.wpr2.elf.addr; - meta->bootBinOffset = gsp->fb.wpr2.boot.addr; - meta->frtsOffset = gsp->fb.wpr2.frts.addr; - meta->frtsSize = gsp->fb.wpr2.frts.size; - meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000); - meta->fbSize = gsp->fb.size; - meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr; - meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size; - meta->bootCount = 0; - meta->partitionRpcAddr = 0; - meta->partitionRpcRequestOffset = 0; - meta->partitionRpcReplyOffset = 0; - meta->verified = 0; - return 0; -} - -static int -r535_gsp_shared_init(struct nvkm_gsp *gsp) -{ - struct { - msgqTxHeader tx; - msgqRxHeader rx; - } *cmdq, *msgq; - int ret, i; - - gsp->shm.cmdq.size = 0x40000; - gsp->shm.msgq.size = 0x40000; - - gsp->shm.ptes.nr = (gsp->shm.cmdq.size + gsp->shm.msgq.size) >> GSP_PAGE_SHIFT; - gsp->shm.ptes.nr += DIV_ROUND_UP(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); - gsp->shm.ptes.size = ALIGN(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); - - ret = nvkm_gsp_mem_ctor(gsp, gsp->shm.ptes.size + - gsp->shm.cmdq.size + - gsp->shm.msgq.size, - &gsp->shm.mem); - if (ret) - return ret; - - gsp->shm.ptes.ptr = gsp->shm.mem.data; - gsp->shm.cmdq.ptr = (u8 *)gsp->shm.ptes.ptr + gsp->shm.ptes.size; - gsp->shm.msgq.ptr = (u8 *)gsp->shm.cmdq.ptr + gsp->shm.cmdq.size; - - for (i = 0; i < gsp->shm.ptes.nr; i++) - gsp->shm.ptes.ptr[i] = gsp->shm.mem.addr + (i << GSP_PAGE_SHIFT); - - cmdq = gsp->shm.cmdq.ptr; - cmdq->tx.version = 0; - cmdq->tx.size = gsp->shm.cmdq.size; - cmdq->tx.entryOff = GSP_PAGE_SIZE; - cmdq->tx.msgSize = GSP_PAGE_SIZE; - cmdq->tx.msgCount = (cmdq->tx.size - cmdq->tx.entryOff) / cmdq->tx.msgSize; - cmdq->tx.writePtr = 0; - cmdq->tx.flags = 1; - cmdq->tx.rxHdrOff = offsetof(typeof(*cmdq), rx.readPtr); - - msgq = gsp->shm.msgq.ptr; - - gsp->cmdq.cnt = cmdq->tx.msgCount; - gsp->cmdq.wptr = &cmdq->tx.writePtr; - gsp->cmdq.rptr = &msgq->rx.readPtr; - gsp->msgq.cnt = cmdq->tx.msgCount; - gsp->msgq.wptr = &msgq->tx.writePtr; - gsp->msgq.rptr = &cmdq->rx.readPtr; - return 0; -} - -int -r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume) -{ - GSP_ARGUMENTS_CACHED *args; - int ret; - - if (!resume) { - ret = r535_gsp_shared_init(gsp); - if (ret) - return ret; - - ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs); - if (ret) - return ret; - } - - args = gsp->rmargs.data; - args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr; - args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr; - args->messageQueueInitArguments.cmdQueueOffset = - (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data; - args->messageQueueInitArguments.statQueueOffset = - (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data; - - if (!resume) { - args->srInitArguments.oldLevel = 0; - args->srInitArguments.flags = 0; - args->srInitArguments.bInPMTransition = 0; - } else { - args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; - args->srInitArguments.flags = 0; - args->srInitArguments.bInPMTransition = 1; - } - - return 0; -} - -#ifdef CONFIG_DEBUG_FS - -/* - * If GSP-RM load fails, then the GSP nvkm object will be deleted, the logging - * debugfs entries will be deleted, and it will not be possible to debug the - * load failure. The keep_gsp_logging parameter tells Nouveau to copy the - * logging buffers to new debugfs entries, and these entries are retained - * until the driver unloads. - */ -static bool keep_gsp_logging; -module_param(keep_gsp_logging, bool, 0444); -MODULE_PARM_DESC(keep_gsp_logging, - "Migrate the GSP-RM logging debugfs entries upon exit"); - -/* - * GSP-RM uses a pseudo-class mechanism to define of a variety of per-"engine" - * data structures, and each engine has a "class ID" genererated by a - * pre-processor. This is the class ID for the PMU. - */ -#define NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU 0xf3d722 - -/** - * struct rpc_ucode_libos_print_v1e_08 - RPC payload for libos print buffers - * @ucode_eng_desc: the engine descriptor - * @libos_print_buf_size: the size of the libos_print_buf[] - * @libos_print_buf: the actual buffer - * - * The engine descriptor is divided into 31:8 "class ID" and 7:0 "instance - * ID". We only care about messages from PMU. - */ -struct rpc_ucode_libos_print_v1e_08 { - u32 ucode_eng_desc; - u32 libos_print_buf_size; - u8 libos_print_buf[]; -}; - -/** - * r535_gsp_msg_libos_print - capture log message from the PMU - * @priv: gsp pointer - * @fn: function number (ignored) - * @repv: pointer to libos print RPC - * @repc: message size - * - * Called when we receive a UCODE_LIBOS_PRINT event RPC from GSP-RM. This RPC - * contains the contents of the libos print buffer from PMU. It is typically - * only written to when PMU encounters an error. - * - * Technically this RPC can be used to pass print buffers from any number of - * GSP-RM engines, but we only expect to receive them for the PMU. - * - * For the PMU, the buffer is 4K in size and the RPC always contains the full - * contents. - */ -static int -r535_gsp_msg_libos_print(void *priv, u32 fn, void *repv, u32 repc) -{ - struct nvkm_gsp *gsp = priv; - struct nvkm_subdev *subdev = &gsp->subdev; - struct rpc_ucode_libos_print_v1e_08 *rpc = repv; - unsigned int class = rpc->ucode_eng_desc >> 8; - - nvkm_debug(subdev, "received libos print from class 0x%x for %u bytes\n", - class, rpc->libos_print_buf_size); - - if (class != NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU) { - nvkm_warn(subdev, - "received libos print from unknown class 0x%x\n", - class); - return -ENOMSG; - } - - if (rpc->libos_print_buf_size > GSP_PAGE_SIZE) { - nvkm_error(subdev, "libos print is too large (%u bytes)\n", - rpc->libos_print_buf_size); - return -E2BIG; - } - - memcpy(gsp->blob_pmu.data, rpc->libos_print_buf, rpc->libos_print_buf_size); - - return 0; -} - -/** - * create_debugfs - create a blob debugfs entry - * @gsp: gsp pointer - * @name: name of this dentry - * @blob: blob wrapper - * - * Creates a debugfs entry for a logging buffer with the name 'name'. - */ -static struct dentry *create_debugfs(struct nvkm_gsp *gsp, const char *name, - struct debugfs_blob_wrapper *blob) -{ - struct dentry *dent; - - dent = debugfs_create_blob(name, 0444, gsp->debugfs.parent, blob); - if (IS_ERR(dent)) { - nvkm_error(&gsp->subdev, - "failed to create %s debugfs entry\n", name); - return NULL; - } - - /* - * For some reason, debugfs_create_blob doesn't set the size of the - * dentry, so do that here. See [1] - * - * [1] https://lore.kernel.org/r/linux-fsdevel/20240207200619.3354549-1-ttabi@nvidia.com/ - */ - i_size_write(d_inode(dent), blob->size); - - return dent; -} - -/** - * r535_gsp_libos_debugfs_init - create logging debugfs entries - * @gsp: gsp pointer - * - * Create the debugfs entries. This exposes the log buffers to userspace so - * that an external tool can parse it. - * - * The 'logpmu' contains exception dumps from the PMU. It is written via an - * RPC sent from GSP-RM and must be only 4KB. We create it here because it's - * only useful if there is a debugfs entry to expose it. If we get the PMU - * logging RPC and there is no debugfs entry, the RPC is just ignored. - * - * The blob_init, blob_rm, and blob_pmu objects can't be transient - * because debugfs_create_blob doesn't copy them. - * - * NOTE: OpenRM loads the logging elf image and prints the log messages - * in real-time. We may add that capability in the future, but that - * requires loading ELF images that are not distributed with the driver and - * adding the parsing code to Nouveau. - * - * Ideally, this should be part of nouveau_debugfs_init(), but that function - * is called too late. We really want to create these debugfs entries before - * r535_gsp_booter_load() is called, so that if GSP-RM fails to initialize, - * there could still be a log to capture. - */ -static void -r535_gsp_libos_debugfs_init(struct nvkm_gsp *gsp) -{ - struct device *dev = gsp->subdev.device->dev; - - /* Create a new debugfs directory with a name unique to this GPU. */ - gsp->debugfs.parent = debugfs_create_dir(dev_name(dev), nouveau_debugfs_root); - if (IS_ERR(gsp->debugfs.parent)) { - nvkm_error(&gsp->subdev, - "failed to create %s debugfs root\n", dev_name(dev)); - return; - } - - gsp->blob_init.data = gsp->loginit.data; - gsp->blob_init.size = gsp->loginit.size; - gsp->blob_intr.data = gsp->logintr.data; - gsp->blob_intr.size = gsp->logintr.size; - gsp->blob_rm.data = gsp->logrm.data; - gsp->blob_rm.size = gsp->logrm.size; - - gsp->debugfs.init = create_debugfs(gsp, "loginit", &gsp->blob_init); - if (!gsp->debugfs.init) - goto error; - - gsp->debugfs.intr = create_debugfs(gsp, "logintr", &gsp->blob_intr); - if (!gsp->debugfs.intr) - goto error; - - gsp->debugfs.rm = create_debugfs(gsp, "logrm", &gsp->blob_rm); - if (!gsp->debugfs.rm) - goto error; - - /* - * Since the PMU buffer is copied from an RPC, it doesn't need to be - * a DMA buffer. - */ - gsp->blob_pmu.size = GSP_PAGE_SIZE; - gsp->blob_pmu.data = kzalloc(gsp->blob_pmu.size, GFP_KERNEL); - if (!gsp->blob_pmu.data) - goto error; - - gsp->debugfs.pmu = create_debugfs(gsp, "logpmu", &gsp->blob_pmu); - if (!gsp->debugfs.pmu) { - kfree(gsp->blob_pmu.data); - goto error; - } - - i_size_write(d_inode(gsp->debugfs.init), gsp->blob_init.size); - i_size_write(d_inode(gsp->debugfs.intr), gsp->blob_intr.size); - i_size_write(d_inode(gsp->debugfs.rm), gsp->blob_rm.size); - i_size_write(d_inode(gsp->debugfs.pmu), gsp->blob_pmu.size); - - r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, - r535_gsp_msg_libos_print, gsp); - - nvkm_debug(&gsp->subdev, "created debugfs GSP-RM logging entries\n"); - - if (keep_gsp_logging) { - nvkm_info(&gsp->subdev, - "logging buffers will be retained on failure\n"); - } - - return; - -error: - debugfs_remove(gsp->debugfs.parent); - gsp->debugfs.parent = NULL; -} - -#endif - -static inline u64 -r535_gsp_libos_id8(const char *name) -{ - u64 id = 0; - - for (int i = 0; i < sizeof(id) && *name; i++, name++) - id = (id << 8) | *name; - - return id; -} - -/** - * create_pte_array() - creates a PTE array of a physically contiguous buffer - * @ptes: pointer to the array - * @addr: base address of physically contiguous buffer (GSP_PAGE_SIZE aligned) - * @size: size of the buffer - * - * GSP-RM sometimes expects physically-contiguous buffers to have an array of - * "PTEs" for each page in that buffer. Although in theory that allows for - * the buffer to be physically discontiguous, GSP-RM does not currently - * support that. - * - * In this case, the PTEs are DMA addresses of each page of the buffer. Since - * the buffer is physically contiguous, calculating all the PTEs is simple - * math. - * - * See memdescGetPhysAddrsForGpu() - */ -static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size) -{ - unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE); - unsigned int i; - - for (i = 0; i < num_pages; i++) - ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT); -} - -/** - * r535_gsp_libos_init() -- create the libos arguments structure - * @gsp: gsp pointer - * - * The logging buffers are byte queues that contain encoded printf-like - * messages from GSP-RM. They need to be decoded by a special application - * that can parse the buffers. - * - * The 'loginit' buffer contains logs from early GSP-RM init and - * exception dumps. The 'logrm' buffer contains the subsequent logs. Both are - * written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE. - * - * The physical address map for the log buffer is stored in the buffer - * itself, starting with offset 1. Offset 0 contains the "put" pointer (pp). - * Initially, pp is equal to 0. If the buffer has valid logging data in it, - * then pp points to index into the buffer where the next logging entry will - * be written. Therefore, the logging data is valid if: - * 1 <= pp < sizeof(buffer)/sizeof(u64) - * - * The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is - * configured for a larger page size (e.g. 64K pages), we need to give - * the GSP an array of 4K pages. Fortunately, since the buffer is - * physically contiguous, it's simple math to calculate the addresses. - * - * The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently - * ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the - * buffers to be physically contiguous anyway. - * - * The memory allocated for the arguments must remain until the GSP sends the - * init_done RPC. - * - * See _kgspInitLibosLoggingStructures (allocates memory for buffers) - * See kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array) - */ -static int -r535_gsp_libos_init(struct nvkm_gsp *gsp) -{ - LibosMemoryRegionInitArgument *args; - int ret; - - ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->libos); - if (ret) - return ret; - - args = gsp->libos.data; - - ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->loginit); - if (ret) - return ret; - - args[0].id8 = r535_gsp_libos_id8("LOGINIT"); - args[0].pa = gsp->loginit.addr; - args[0].size = gsp->loginit.size; - args[0].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; - args[0].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; - create_pte_array(gsp->loginit.data + sizeof(u64), gsp->loginit.addr, gsp->loginit.size); - - ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logintr); - if (ret) - return ret; - - args[1].id8 = r535_gsp_libos_id8("LOGINTR"); - args[1].pa = gsp->logintr.addr; - args[1].size = gsp->logintr.size; - args[1].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; - args[1].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; - create_pte_array(gsp->logintr.data + sizeof(u64), gsp->logintr.addr, gsp->logintr.size); - - ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logrm); - if (ret) - return ret; - - args[2].id8 = r535_gsp_libos_id8("LOGRM"); - args[2].pa = gsp->logrm.addr; - args[2].size = gsp->logrm.size; - args[2].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; - args[2].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; - create_pte_array(gsp->logrm.data + sizeof(u64), gsp->logrm.addr, gsp->logrm.size); - - ret = r535_gsp_rmargs_init(gsp, false); - if (ret) - return ret; - - args[3].id8 = r535_gsp_libos_id8("RMARGS"); - args[3].pa = gsp->rmargs.addr; - args[3].size = gsp->rmargs.size; - args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; - args[3].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; - -#ifdef CONFIG_DEBUG_FS - r535_gsp_libos_debugfs_init(gsp); -#endif - - return 0; -} - -void -nvkm_gsp_sg_free(struct nvkm_device *device, struct sg_table *sgt) -{ - struct scatterlist *sgl; - int i; - - dma_unmap_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0); - - for_each_sgtable_sg(sgt, sgl, i) { - struct page *page = sg_page(sgl); - - __free_page(page); - } - - sg_free_table(sgt); -} - -int -nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt) -{ - const u64 pages = DIV_ROUND_UP(size, PAGE_SIZE); - struct scatterlist *sgl; - int ret, i; - - ret = sg_alloc_table(sgt, pages, GFP_KERNEL); - if (ret) - return ret; - - for_each_sgtable_sg(sgt, sgl, i) { - struct page *page = alloc_page(GFP_KERNEL); - - if (!page) { - nvkm_gsp_sg_free(device, sgt); - return -ENOMEM; - } - - sg_set_page(sgl, page, PAGE_SIZE, 0); - } - - ret = dma_map_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0); - if (ret) - nvkm_gsp_sg_free(device, sgt); - - return ret; -} - -static void -nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3) -{ - nvkm_gsp_sg_free(gsp->subdev.device, &rx3->lvl2); - nvkm_gsp_mem_dtor(&rx3->lvl1); - nvkm_gsp_mem_dtor(&rx3->lvl0); -} - -/** - * nvkm_gsp_radix3_sg - build a radix3 table from a S/G list - * @gsp: gsp pointer - * @sgt: S/G list to traverse - * @size: size of the image, in bytes - * @rx3: radix3 array to update - * - * The GSP uses a three-level page table, called radix3, to map the firmware. - * Each 64-bit "pointer" in the table is either the bus address of an entry in - * the next table (for levels 0 and 1) or the bus address of the next page in - * the GSP firmware image itself. - * - * Level 0 contains a single entry in one page that points to the first page - * of level 1. - * - * Level 1, since it's also only one page in size, contains up to 512 entries, - * one for each page in Level 2. - * - * Level 2 can be up to 512 pages in size, and each of those entries points to - * the next page of the firmware image. Since there can be up to 512*512 - * pages, that limits the size of the firmware to 512*512*GSP_PAGE_SIZE = 1GB. - * - * Internally, the GSP has its window into system memory, but the base - * physical address of the aperture is not 0. In fact, it varies depending on - * the GPU architecture. Since the GPU is a PCI device, this window is - * accessed via DMA and is therefore bound by IOMMU translation. The end - * result is that GSP-RM must translate the bus addresses in the table to GSP - * physical addresses. All this should happen transparently. - * - * Returns 0 on success, or negative error code - * - * See kgspCreateRadix3_IMPL - */ -static int -nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size, - struct nvkm_gsp_radix3 *rx3) -{ - struct sg_dma_page_iter sg_dma_iter; - struct scatterlist *sg; - size_t bufsize; - u64 *pte; - int ret, i, page_idx = 0; - - ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl0); - if (ret) - return ret; - - ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl1); - if (ret) - goto lvl1_fail; - - // Allocate level 2 - bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); - ret = nvkm_gsp_sg(gsp->subdev.device, bufsize, &rx3->lvl2); - if (ret) - goto lvl2_fail; - - // Write the bus address of level 1 to level 0 - pte = rx3->lvl0.data; - *pte = rx3->lvl1.addr; - - // Write the bus address of each page in level 2 to level 1 - pte = rx3->lvl1.data; - for_each_sgtable_dma_page(&rx3->lvl2, &sg_dma_iter, 0) - *pte++ = sg_page_iter_dma_address(&sg_dma_iter); - - // Finally, write the bus address of each page in sgt to level 2 - for_each_sgtable_sg(&rx3->lvl2, sg, i) { - void *sgl_end; - - pte = sg_virt(sg); - sgl_end = (void *)pte + sg->length; - - for_each_sgtable_dma_page(sgt, &sg_dma_iter, page_idx) { - *pte++ = sg_page_iter_dma_address(&sg_dma_iter); - page_idx++; - - // Go to the next scatterlist for level 2 if we've reached the end - if ((void *)pte >= sgl_end) - break; - } - } - - if (ret) { -lvl2_fail: - nvkm_gsp_mem_dtor(&rx3->lvl1); -lvl1_fail: - nvkm_gsp_mem_dtor(&rx3->lvl0); - } - - return ret; -} - -int -r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) -{ - int ret; - - if (suspend) { - GspFwWprMeta *meta = gsp->wpr_meta.data; - u64 len = meta->gspFwWprEnd - meta->gspFwWprStart; - GspFwSRMeta *sr; - - ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt); - if (ret) - return ret; - - ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3); - if (ret) - return ret; - - ret = nvkm_gsp_mem_ctor(gsp, sizeof(*sr), &gsp->sr.meta); - if (ret) - return ret; - - sr = gsp->sr.meta.data; - sr->magic = GSP_FW_SR_META_MAGIC; - sr->revision = GSP_FW_SR_META_REVISION; - sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr; - sr->sizeOfSuspendResumeData = len; - } - - ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend); - if (WARN_ON(ret)) - return ret; - - nvkm_msec(gsp->subdev.device, 2000, - if (nvkm_falcon_rd32(&gsp->falcon, 0x040) == 0x80000000) - break; - ); - - gsp->running = false; - return 0; -} - -int -r535_gsp_init(struct nvkm_gsp *gsp) -{ - int ret; - - nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); - - if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) - return -EIO; - - ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE); - if (ret) - goto done; - - gsp->running = true; - -done: - if (gsp->sr.meta.data) { - nvkm_gsp_mem_dtor(&gsp->sr.meta); - nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3); - nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt); - return ret; - } - - if (ret == 0) - ret = r535_gsp_postinit(gsp); - - return ret; -} - -static int -r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp) -{ - const struct firmware *fw = gsp->fws.bl; - const struct nvfw_bin_hdr *hdr; - RM_RISCV_UCODE_DESC *desc; - int ret; - - hdr = nvfw_bin_hdr(&gsp->subdev, fw->data); - desc = (void *)fw->data + hdr->header_offset; - - ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw); - if (ret) - return ret; - - memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size); - - gsp->boot.code_offset = desc->monitorCodeOffset; - gsp->boot.data_offset = desc->monitorDataOffset; - gsp->boot.manifest_offset = desc->manifestOffset; - gsp->boot.app_version = desc->appVersion; - return 0; -} - -static const struct nvkm_firmware_func -r535_gsp_fw = { - .type = NVKM_FIRMWARE_IMG_SGT, -}; - -static int -r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u64 *psize) -{ - const u8 *img = gsp->fws.rm->data; - const struct elf64_hdr *ehdr = (const struct elf64_hdr *)img; - const struct elf64_shdr *shdr = (const struct elf64_shdr *)&img[ehdr->e_shoff]; - const char *names = &img[shdr[ehdr->e_shstrndx].sh_offset]; - - for (int i = 0; i < ehdr->e_shnum; i++, shdr++) { - if (!strcmp(&names[shdr->sh_name], name)) { - *pdata = &img[shdr->sh_offset]; - *psize = shdr->sh_size; - return 0; - } - } - - nvkm_error(&gsp->subdev, "section '%s' not found\n", name); - return -ENOENT; -} - -#ifdef CONFIG_DEBUG_FS - -struct r535_gsp_log { - struct nvif_log log; - - /* - * Logging buffers in debugfs. The wrapper objects need to remain - * in memory until the dentry is deleted. - */ - struct dentry *debugfs_logging_dir; - struct debugfs_blob_wrapper blob_init; - struct debugfs_blob_wrapper blob_intr; - struct debugfs_blob_wrapper blob_rm; - struct debugfs_blob_wrapper blob_pmu; -}; - -/** - * r535_debugfs_shutdown - delete GSP-RM logging buffers for one GPU - * @_log: nvif_log struct for this GPU - * - * Called when the driver is shutting down, to clean up the retained GSP-RM - * logging buffers. - */ -static void r535_debugfs_shutdown(struct nvif_log *_log) -{ - struct r535_gsp_log *log = container_of(_log, struct r535_gsp_log, log); - - debugfs_remove(log->debugfs_logging_dir); - - kfree(log->blob_init.data); - kfree(log->blob_intr.data); - kfree(log->blob_rm.data); - kfree(log->blob_pmu.data); - - /* We also need to delete the list object */ - kfree(log); -} - -/** - * is_empty - return true if the logging buffer was never written to - * @b: blob wrapper with ->data field pointing to logging buffer - * - * The first 64-bit field of loginit, and logintr, and logrm is the 'put' - * pointer, and it is initialized to 0. It's a dword-based index into the - * circular buffer, indicating where the next printf write will be made. - * - * If the pointer is still 0 when GSP-RM is shut down, that means that the - * buffer was never written to, so it can be ignored. - * - * This test also works for logpmu, even though it doesn't have a put pointer. - */ -static bool is_empty(const struct debugfs_blob_wrapper *b) -{ - u64 *put = b->data; - - return put ? (*put == 0) : true; -} - -/** - * r535_gsp_copy_log - preserve the logging buffers in a blob - * @parent: the top-level dentry for this GPU - * @name: name of debugfs entry to create - * @s: original wrapper object to copy from - * @t: new wrapper object to copy to - * - * When GSP shuts down, the nvkm_gsp object and all its memory is deleted. - * To preserve the logging buffers, the buffers need to be copied, but only - * if they actually have data. - */ -static int r535_gsp_copy_log(struct dentry *parent, - const char *name, - const struct debugfs_blob_wrapper *s, - struct debugfs_blob_wrapper *t) -{ - struct dentry *dent; - void *p; - - if (is_empty(s)) - return 0; - - /* The original buffers will be deleted */ - p = kmemdup(s->data, s->size, GFP_KERNEL); - if (!p) - return -ENOMEM; - - t->data = p; - t->size = s->size; - - dent = debugfs_create_blob(name, 0444, parent, t); - if (IS_ERR(dent)) { - kfree(p); - memset(t, 0, sizeof(*t)); - return PTR_ERR(dent); - } - - i_size_write(d_inode(dent), t->size); - - return 0; -} - -/** - * r535_gsp_retain_logging - copy logging buffers to new debugfs root - * @gsp: gsp pointer - * - * If keep_gsp_logging is enabled, then we want to preserve the GSP-RM logging - * buffers and their debugfs entries, but all those objects would normally - * deleted if GSP-RM fails to load. - * - * To preserve the logging buffers, we need to: - * - * 1) Allocate new buffers and copy the logs into them, so that the original - * DMA buffers can be released. - * - * 2) Preserve the directories. We don't need to save single dentries because - * we're going to delete the parent when the - * - * If anything fails in this process, then all the dentries need to be - * deleted. We don't need to deallocate the original logging buffers because - * the caller will do that regardless. - */ -static void r535_gsp_retain_logging(struct nvkm_gsp *gsp) -{ - struct device *dev = gsp->subdev.device->dev; - struct r535_gsp_log *log = NULL; - int ret; - - if (!keep_gsp_logging || !gsp->debugfs.parent) { - /* Nothing to do */ - goto exit; - } - - /* Check to make sure at least one buffer has data. */ - if (is_empty(&gsp->blob_init) && is_empty(&gsp->blob_intr) && - is_empty(&gsp->blob_rm) && is_empty(&gsp->blob_rm)) { - nvkm_warn(&gsp->subdev, "all logging buffers are empty\n"); - goto exit; - } - - log = kzalloc(sizeof(*log), GFP_KERNEL); - if (!log) - goto error; - - /* - * Since the nvkm_gsp object is going away, the debugfs_blob_wrapper - * objects are also being deleted, which means the dentries will no - * longer be valid. Delete the existing entries so that we can create - * new ones with the same name. - */ - debugfs_remove(gsp->debugfs.init); - debugfs_remove(gsp->debugfs.intr); - debugfs_remove(gsp->debugfs.rm); - debugfs_remove(gsp->debugfs.pmu); - - ret = r535_gsp_copy_log(gsp->debugfs.parent, "loginit", &gsp->blob_init, &log->blob_init); - if (ret) - goto error; - - ret = r535_gsp_copy_log(gsp->debugfs.parent, "logintr", &gsp->blob_intr, &log->blob_intr); - if (ret) - goto error; - - ret = r535_gsp_copy_log(gsp->debugfs.parent, "logrm", &gsp->blob_rm, &log->blob_rm); - if (ret) - goto error; - - ret = r535_gsp_copy_log(gsp->debugfs.parent, "logpmu", &gsp->blob_pmu, &log->blob_pmu); - if (ret) - goto error; - - /* The nvkm_gsp object is going away, so save the dentry */ - log->debugfs_logging_dir = gsp->debugfs.parent; - - log->log.shutdown = r535_debugfs_shutdown; - list_add(&log->log.entry, &gsp_logs.head); - - nvkm_warn(&gsp->subdev, - "logging buffers migrated to /sys/kernel/debug/nouveau/%s\n", - dev_name(dev)); - - return; - -error: - nvkm_warn(&gsp->subdev, "failed to migrate logging buffers\n"); - -exit: - debugfs_remove(gsp->debugfs.parent); - - if (log) { - kfree(log->blob_init.data); - kfree(log->blob_intr.data); - kfree(log->blob_rm.data); - kfree(log->blob_pmu.data); - kfree(log); - } -} - -#endif - -/** - * r535_gsp_libos_debugfs_fini - cleanup/retain log buffers on shutdown - * @gsp: gsp pointer - * - * If the log buffers are exposed via debugfs, the data for those entries - * needs to be cleaned up when the GSP device shuts down. - */ -static void -r535_gsp_libos_debugfs_fini(struct nvkm_gsp __maybe_unused *gsp) -{ -#ifdef CONFIG_DEBUG_FS - r535_gsp_retain_logging(gsp); - - /* - * Unlike the other buffers, the PMU blob is a kmalloc'd buffer that - * exists only if the debugfs entries were created. - */ - kfree(gsp->blob_pmu.data); - gsp->blob_pmu.data = NULL; -#endif -} - -void -r535_gsp_dtor(struct nvkm_gsp *gsp) -{ - idr_destroy(&gsp->client_id.idr); - mutex_destroy(&gsp->client_id.mutex); - - nvkm_gsp_radix3_dtor(gsp, &gsp->radix3); - nvkm_gsp_mem_dtor(&gsp->sig); - nvkm_firmware_dtor(&gsp->fw); - - nvkm_falcon_fw_dtor(&gsp->booter.unload); - nvkm_falcon_fw_dtor(&gsp->booter.load); - - mutex_destroy(&gsp->msgq.mutex); - mutex_destroy(&gsp->cmdq.mutex); - - nvkm_gsp_dtor_fws(gsp); - - nvkm_gsp_mem_dtor(&gsp->rmargs); - nvkm_gsp_mem_dtor(&gsp->wpr_meta); - nvkm_gsp_mem_dtor(&gsp->shm.mem); - - r535_gsp_libos_debugfs_fini(gsp); - - nvkm_gsp_mem_dtor(&gsp->loginit); - nvkm_gsp_mem_dtor(&gsp->logintr); - nvkm_gsp_mem_dtor(&gsp->logrm); -} - -int -r535_gsp_oneinit(struct nvkm_gsp *gsp) -{ - struct nvkm_device *device = gsp->subdev.device; - const u8 *data; - u64 size; - int ret; - - mutex_init(&gsp->cmdq.mutex); - mutex_init(&gsp->msgq.mutex); - - /* Load GSP firmware from ELF image into DMA-accessible memory. */ - ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size); - if (ret) - return ret; - - ret = nvkm_firmware_ctor(&r535_gsp_fw, "gsp-rm", device, data, size, &gsp->fw); - if (ret) - return ret; - - /* Load relevant signature from ELF image. */ - ret = r535_gsp_elf_section(gsp, gsp->func->sig_section, &data, &size); - if (ret) - return ret; - - ret = nvkm_gsp_mem_ctor(gsp, ALIGN(size, 256), &gsp->sig); - if (ret) - return ret; - - memcpy(gsp->sig.data, data, size); - - /* Build radix3 page table for ELF image. */ - ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3); - if (ret) - return ret; - - r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, - r535_gsp_msg_run_cpu_sequencer, gsp); - r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp); - r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, - r535_gsp_msg_rc_triggered, gsp); - r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, - r535_gsp_msg_mmu_fault_queued, gsp); - r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp); - r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL); - r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL); - r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL); - ret = r535_gsp_rm_boot_ctor(gsp); - if (ret) - return ret; - - /* Release FW images - we've copied them to DMA buffers now. */ - nvkm_gsp_dtor_fws(gsp); - - /* Calculate FB layout. */ - gsp->fb.wpr2.frts.size = 0x100000; - gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size; - - gsp->fb.wpr2.boot.size = gsp->boot.fw.size; - gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000); - - gsp->fb.wpr2.elf.size = gsp->fw.len; - gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000); - - { - u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30); - - gsp->fb.wpr2.heap.size = - gsp->func->wpr_heap.os_carveout_size + - gsp->func->wpr_heap.base_size + - ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) + - ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20); - - gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size); - } - - gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000); - gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000); - - gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000); - gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr; - - gsp->fb.heap.size = 0x100000; - gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size; - - ret = nvkm_gsp_fwsec_frts(gsp); - if (WARN_ON(ret)) - return ret; - - ret = r535_gsp_libos_init(gsp); - if (WARN_ON(ret)) - return ret; - - ret = r535_gsp_wpr_meta_init(gsp); - if (WARN_ON(ret)) - return ret; - - ret = r535_gsp_rpc_set_system_info(gsp); - if (WARN_ON(ret)) - return ret; - - ret = r535_gsp_rpc_set_registry(gsp); - if (WARN_ON(ret)) - return ret; - - mutex_init(&gsp->client_id.mutex); - idr_init(&gsp->client_id.idr); - return 0; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild index d50f2c351d93..a5f6b2abfd33 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild @@ -3,8 +3,23 @@ # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. nvkm-y += nvkm/subdev/gsp/rm/r535/rm.o +nvkm-y += nvkm/subdev/gsp/rm/r535/gsp.o nvkm-y += nvkm/subdev/gsp/rm/r535/rpc.o nvkm-y += nvkm/subdev/gsp/rm/r535/ctrl.o nvkm-y += nvkm/subdev/gsp/rm/r535/alloc.o nvkm-y += nvkm/subdev/gsp/rm/r535/client.o nvkm-y += nvkm/subdev/gsp/rm/r535/device.o + +nvkm-y += nvkm/subdev/gsp/rm/r535/bar.o +nvkm-y += nvkm/subdev/gsp/rm/r535/fbsr.o +nvkm-y += nvkm/subdev/gsp/rm/r535/vmm.o + +nvkm-y += nvkm/subdev/gsp/rm/r535/disp.o + +nvkm-y += nvkm/subdev/gsp/rm/r535/fifo.o +nvkm-y += nvkm/subdev/gsp/rm/r535/ce.o +nvkm-y += nvkm/subdev/gsp/rm/r535/gr.o +nvkm-y += nvkm/subdev/gsp/rm/r535/nvdec.o +nvkm-y += nvkm/subdev/gsp/rm/r535/nvenc.o +nvkm-y += nvkm/subdev/gsp/rm/r535/nvjpg.o +nvkm-y += nvkm/subdev/gsp/rm/r535/ofa.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c new file mode 100644 index 000000000000..ce2c86c159b5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c @@ -0,0 +1,185 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static void +r535_bar_flush(struct nvkm_bar *bar) +{ + ioread32_native(bar->flushBAR2); +} + +static void +r535_bar_bar2_wait(struct nvkm_bar *base) +{ +} + +static int +r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr) +{ + rpc_update_bar_pde_v15_00 *rpc; + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE, sizeof(*rpc)); + if (WARN_ON(IS_ERR_OR_NULL(rpc))) + return -EIO; + + rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2; + rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */ + rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu! + + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); +} + +static void +r535_bar_bar2_fini(struct nvkm_bar *bar) +{ + struct nvkm_gsp *gsp = bar->subdev.device->gsp; + + bar->flushBAR2 = bar->flushBAR2PhysMode; + nvkm_done(bar->flushFBZero); + + WARN_ON(r535_bar_bar2_update_pde(gsp, 0)); +} + +static void +r535_bar_bar2_init(struct nvkm_bar *bar) +{ + struct nvkm_device *device = bar->subdev.device; + struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm; + struct nvkm_gsp *gsp = device->gsp; + + WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->pd->pde[0]->pt[0]->addr)); + vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb; + + if (!bar->flushFBZero) { + struct nvkm_memory *fbZero; + int ret; + + ret = nvkm_ram_wrap(device, 0, 0x1000, &fbZero); + if (ret == 0) { + ret = nvkm_memory_kmap(fbZero, &bar->flushFBZero); + nvkm_memory_unref(&fbZero); + } + WARN_ON(ret); + } + + bar->bar2 = true; + bar->flushBAR2 = nvkm_kmap(bar->flushFBZero); + WARN_ON(!bar->flushBAR2); +} + +static void +r535_bar_bar1_wait(struct nvkm_bar *base) +{ +} + +static void +r535_bar_bar1_fini(struct nvkm_bar *base) +{ +} + +static void +r535_bar_bar1_init(struct nvkm_bar *bar) +{ + struct nvkm_device *device = bar->subdev.device; + struct nvkm_gsp *gsp = device->gsp; + struct nvkm_vmm *vmm = gf100_bar(bar)->bar[1].vmm; + struct nvkm_memory *pd3; + int ret; + + ret = nvkm_ram_wrap(device, gsp->bar.rm_bar1_pdb, 0x1000, &pd3); + if (WARN_ON(ret)) + return; + + nvkm_memory_unref(&vmm->pd->pt[0]->memory); + + ret = nvkm_memory_kmap(pd3, &vmm->pd->pt[0]->memory); + nvkm_memory_unref(&pd3); + if (WARN_ON(ret)) + return; + + vmm->pd->pt[0]->addr = nvkm_memory_addr(vmm->pd->pt[0]->memory); +} + +static void * +r535_bar_dtor(struct nvkm_bar *bar) +{ + void *data = gf100_bar_dtor(bar); + + nvkm_memory_unref(&bar->flushFBZero); + + if (bar->flushBAR2PhysMode) + iounmap(bar->flushBAR2PhysMode); + + kfree(bar->func); + return data; +} + +int +r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar) +{ + struct nvkm_bar_func *rm; + struct nvkm_bar *bar; + int ret; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_bar_dtor; + rm->oneinit = hw->oneinit; + rm->bar1.init = r535_bar_bar1_init; + rm->bar1.fini = r535_bar_bar1_fini; + rm->bar1.wait = r535_bar_bar1_wait; + rm->bar1.vmm = hw->bar1.vmm; + rm->bar2.init = r535_bar_bar2_init; + rm->bar2.fini = r535_bar_bar2_fini; + rm->bar2.wait = r535_bar_bar2_wait; + rm->bar2.vmm = hw->bar2.vmm; + rm->flush = r535_bar_flush; + + ret = gf100_bar_new_(rm, device, type, inst, &bar); + if (ret) { + kfree(rm); + return ret; + } + *pbar = bar; + + bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE); + if (!bar->flushBAR2PhysMode) + return -ENOMEM; + + bar->flushBAR2 = bar->flushBAR2PhysMode; + + gf100_bar(*pbar)->bar2_halve = true; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c new file mode 100644 index 000000000000..0d73906f4a5a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c @@ -0,0 +1,108 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#include +#include +#include + +#include +#include +#include + +struct r535_ce_obj { + struct nvkm_object object; + struct nvkm_gsp_object rm; +}; + +static void * +r535_ce_obj_dtor(struct nvkm_object *object) +{ + struct r535_ce_obj *obj = container_of(object, typeof(*obj), object); + + nvkm_gsp_rm_free(&obj->rm); + return obj; +} + +static const struct nvkm_object_func +r535_ce_obj = { + .dtor = r535_ce_obj_dtor, +}; + +static int +r535_ce_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); + struct r535_ce_obj *obj; + NVC0B5_ALLOCATION_PARAMETERS *args; + + if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&r535_ce_obj, oclass, &obj->object); + *pobject = &obj->object; + + args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, + sizeof(*args), &obj->rm); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->version = 1; + args->engineType = NV2080_ENGINE_TYPE_COPY0 + oclass->engine->subdev.inst; + + return nvkm_gsp_rm_alloc_wr(&obj->rm, args); +} + +static void * +r535_ce_dtor(struct nvkm_engine *engine) +{ + kfree(engine->func); + return engine; +} + +int +r535_ce_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) +{ + struct nvkm_engine_func *rm; + int nclass, ret; + + for (nclass = 0; hw->sclass[nclass].oclass; nclass++); + + if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_ce_dtor; + for (int i = 0; i < nclass; i++) { + rm->sclass[i].minver = hw->sclass[i].minver; + rm->sclass[i].maxver = hw->sclass[i].maxver; + rm->sclass[i].oclass = hw->sclass[i].oclass; + rm->sclass[i].ctor = r535_ce_obj_ctor; + } + + ret = nvkm_engine_new_(rm, device, type, inst, true, pengine); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c new file mode 100644 index 000000000000..1aae15167249 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c @@ -0,0 +1,1725 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static u64 +r535_chan_user(struct nvkm_disp_chan *chan, u64 *psize) +{ + switch (chan->object.oclass & 0xff) { + case 0x7d: *psize = 0x10000; return 0x680000; + case 0x7e: *psize = 0x01000; return 0x690000 + (chan->head * *psize); + case 0x7b: *psize = 0x01000; return 0x6b0000 + (chan->head * *psize); + case 0x7a: *psize = 0x01000; return 0x6d8000 + (chan->head * *psize); + default: + BUG_ON(1); + break; + } + + return 0ULL; +} + +static void +r535_chan_intr(struct nvkm_disp_chan *chan, bool en) +{ +} + +static void +r535_chan_fini(struct nvkm_disp_chan *chan) +{ + nvkm_gsp_rm_free(&chan->rm.object); +} + +static int +r535_chan_push(struct nvkm_disp_chan *chan) +{ + struct nvkm_gsp *gsp = chan->disp->engine.subdev.device->gsp; + NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + if (chan->memory) { + switch (nvkm_memory_target(chan->memory)) { + case NVKM_MEM_TARGET_NCOH: + ctrl->addressSpace = ADDR_SYSMEM; + ctrl->cacheSnoop = 0; + break; + case NVKM_MEM_TARGET_HOST: + ctrl->addressSpace = ADDR_SYSMEM; + ctrl->cacheSnoop = 1; + break; + case NVKM_MEM_TARGET_VRAM: + ctrl->addressSpace = ADDR_FBMEM; + break; + default: + WARN_ON(1); + return -EINVAL; + } + + ctrl->physicalAddr = nvkm_memory_addr(chan->memory); + ctrl->limit = nvkm_memory_size(chan->memory) - 1; + } + + ctrl->hclass = chan->object.oclass; + ctrl->channelInstance = chan->head; + ctrl->valid = ((chan->object.oclass & 0xff) != 0x7a) ? 1 : 0; + + return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); +} + +static int +r535_curs_init(struct nvkm_disp_chan *chan) +{ + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *args; + int ret; + + ret = r535_chan_push(chan); + if (ret) + return ret; + + args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object, + (chan->object.oclass << 16) | chan->head, + chan->object.oclass, sizeof(*args), &chan->rm.object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->channelInstance = chan->head; + + return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args); +} + +static const struct nvkm_disp_chan_func +r535_curs_func = { + .init = r535_curs_init, + .fini = r535_chan_fini, + .intr = r535_chan_intr, + .user = r535_chan_user, +}; + +static const struct nvkm_disp_chan_user +r535_curs = { + .func = &r535_curs_func, + .user = 73, +}; + +static int +r535_dmac_bind(struct nvkm_disp_chan *chan, struct nvkm_object *object, u32 handle) +{ + return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle, + chan->chid.user << 25 | + (chan->disp->rm.client.object.handle & 0x3fff)); +} + +static void +r535_dmac_fini(struct nvkm_disp_chan *chan) +{ + struct nvkm_device *device = chan->disp->engine.subdev.device; + const u32 uoff = (chan->chid.user - 1) * 0x1000; + + chan->suspend_put = nvkm_rd32(device, 0x690000 + uoff); + r535_chan_fini(chan); +} + +static int +r535_dmac_init(struct nvkm_disp_chan *chan) +{ + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args; + int ret; + + ret = r535_chan_push(chan); + if (ret) + return ret; + + args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object, + (chan->object.oclass << 16) | chan->head, + chan->object.oclass, sizeof(*args), &chan->rm.object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->channelInstance = chan->head; + args->offset = chan->suspend_put; + + return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args); +} + +static int +r535_dmac_push(struct nvkm_disp_chan *chan, u64 memory) +{ + chan->memory = nvkm_umem_search(chan->object.client, memory); + if (IS_ERR(chan->memory)) + return PTR_ERR(chan->memory); + + return 0; +} + +static const struct nvkm_disp_chan_func +r535_dmac_func = { + .push = r535_dmac_push, + .init = r535_dmac_init, + .fini = r535_dmac_fini, + .intr = r535_chan_intr, + .user = r535_chan_user, + .bind = r535_dmac_bind, +}; + +static const struct nvkm_disp_chan_func +r535_wimm_func = { + .push = r535_dmac_push, + .init = r535_dmac_init, + .fini = r535_dmac_fini, + .intr = r535_chan_intr, + .user = r535_chan_user, +}; + +static const struct nvkm_disp_chan_user +r535_wimm = { + .func = &r535_wimm_func, + .user = 33, +}; + +static const struct nvkm_disp_chan_user +r535_wndw = { + .func = &r535_dmac_func, + .user = 1, +}; + +static void +r535_core_fini(struct nvkm_disp_chan *chan) +{ + struct nvkm_device *device = chan->disp->engine.subdev.device; + + chan->suspend_put = nvkm_rd32(device, 0x680000); + r535_chan_fini(chan); +} + +static const struct nvkm_disp_chan_func +r535_core_func = { + .push = r535_dmac_push, + .init = r535_dmac_init, + .fini = r535_core_fini, + .intr = r535_chan_intr, + .user = r535_chan_user, + .bind = r535_dmac_bind, +}; + +static const struct nvkm_disp_chan_user +r535_core = { + .func = &r535_core_func, + .user = 0, +}; + +static int +r535_sor_bl_set(struct nvkm_ior *sor, int lvl) +{ + struct nvkm_disp *disp = sor->disp; + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->displayId = BIT(sor->asy.outp->index); + ctrl->brightness = lvl; + + return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); +} + +static int +r535_sor_bl_get(struct nvkm_ior *sor) +{ + struct nvkm_disp *disp = sor->disp; + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl; + int ret, lvl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->displayId = BIT(sor->asy.outp->index); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + lvl = ctrl->brightness; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return lvl; +} + +static const struct nvkm_ior_func_bl +r535_sor_bl = { + .get = r535_sor_bl_get, + .set = r535_sor_bl_set, +}; + +static void +r535_sor_hda_eld(struct nvkm_ior *sor, int head, u8 *data, u8 size) +{ + struct nvkm_disp *disp = sor->disp; + NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl; + + if (WARN_ON(size > sizeof(ctrl->bufferELD))) + return; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->displayId = BIT(sor->asy.outp->index); + ctrl->numELDSize = size; + memcpy(ctrl->bufferELD, data, size); + ctrl->maxFreqSupported = 0; //XXX + ctrl->ctrl = NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, PD, TRUE); + ctrl->ctrl |= NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, ELDV, TRUE); + ctrl->deviceEntry = head; + + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); +} + +static void +r535_sor_hda_hpd(struct nvkm_ior *sor, int head, bool present) +{ + struct nvkm_disp *disp = sor->disp; + NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl; + + if (present) + return; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->displayId = BIT(sor->asy.outp->index); + ctrl->deviceEntry = head; + + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); +} + +static const struct nvkm_ior_func_hda +r535_sor_hda = { + .hpd = r535_sor_hda_hpd, + .eld = r535_sor_hda_eld, +}; + +static void +r535_sor_dp_audio_mute(struct nvkm_ior *sor, bool mute) +{ + struct nvkm_disp *disp = sor->disp; + NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->displayId = BIT(sor->asy.outp->index); + ctrl->mute = mute; + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); +} + +static void +r535_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable) +{ + struct nvkm_disp *disp = sor->disp; + NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *ctrl; + + if (!enable) + r535_sor_dp_audio_mute(sor, true); + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->displayId = BIT(sor->asy.outp->index); + ctrl->enable = enable; + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); + + if (enable) + r535_sor_dp_audio_mute(sor, false); +} + +static void +r535_sor_dp_vcpi(struct nvkm_ior *sor, int head, u8 slot, u8 slot_nr, u16 pbn, u16 aligned_pbn) +{ + struct nvkm_disp *disp = sor->disp; + struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->subDeviceInstance = 0; + ctrl->head = head; + ctrl->sorIndex = sor->id; + ctrl->dpLink = sor->asy.link == 2; + ctrl->bEnableOverride = 1; + ctrl->bMST = 1; + ctrl->hBlankSym = 0; + ctrl->vBlankSym = 0; + ctrl->colorFormat = 0; + ctrl->bEnableTwoHeadOneOr = 0; + ctrl->singleHeadMultistreamMode = 0; + ctrl->MST.slotStart = slot; + ctrl->MST.slotEnd = slot + slot_nr - 1; + ctrl->MST.PBN = pbn; + ctrl->MST.Timeslice = aligned_pbn; + ctrl->MST.sendACT = 0; + ctrl->MST.singleHeadMSTPipeline = 0; + ctrl->MST.bEnableAudioOverRightPanel = 0; + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); +} + +static int +r535_sor_dp_sst(struct nvkm_ior *sor, int head, bool ef, + u32 watermark, u32 hblanksym, u32 vblanksym) +{ + struct nvkm_disp *disp = sor->disp; + struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->head = head; + ctrl->sorIndex = sor->id; + ctrl->dpLink = sor->asy.link == 2; + ctrl->bEnableOverride = 1; + ctrl->bMST = 0; + ctrl->hBlankSym = hblanksym; + ctrl->vBlankSym = vblanksym; + ctrl->colorFormat = 0; + ctrl->bEnableTwoHeadOneOr = 0; + ctrl->SST.bEnhancedFraming = ef; + ctrl->SST.tuSize = 64; + ctrl->SST.waterMark = watermark; + ctrl->SST.bEnableAudioOverRightPanel = 0; + return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); +} + +static const struct nvkm_ior_func_dp +r535_sor_dp = { + .sst = r535_sor_dp_sst, + .vcpi = r535_sor_dp_vcpi, + .audio = r535_sor_dp_audio, +}; + +static void +r535_sor_hdmi_scdc(struct nvkm_ior *sor, u32 khz, bool support, bool scrambling, + bool scrambling_low_rates) +{ + struct nvkm_outp *outp = sor->asy.outp; + struct nvkm_disp *disp = outp->disp; + NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->displayId = BIT(outp->index); + ctrl->caps = 0; + if (support) + ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, SCDC_SUPPORTED, TRUE); + if (scrambling) + ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, GT_340MHZ_CLOCK_SUPPORTED, TRUE); + if (scrambling_low_rates) + ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, LTE_340MHZ_SCRAMBLING_SUPPORTED, TRUE); + + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); +} + +static void +r535_sor_hdmi_ctrl_audio_mute(struct nvkm_outp *outp, bool mute) +{ + struct nvkm_disp *disp = outp->disp; + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->displayId = BIT(outp->index); + ctrl->mute = mute; + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); +} + +static void +r535_sor_hdmi_ctrl_audio(struct nvkm_outp *outp, bool enable) +{ + struct nvkm_disp *disp = outp->disp; + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->displayId = BIT(outp->index); + ctrl->transmitControl = + NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ENABLE, YES) | + NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, OTHER_FRAME, DISABLE) | + NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, SINGLE_FRAME, DISABLE) | + NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ON_HBLANK, DISABLE) | + NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, VIDEO_FMT, SW_CONTROLLED) | + NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, RESERVED_LEGACY_MODE, NO); + ctrl->packetSize = 10; + ctrl->aPacket[0] = 0x03; + ctrl->aPacket[1] = 0x00; + ctrl->aPacket[2] = 0x00; + ctrl->aPacket[3] = enable ? 0x10 : 0x01; + ctrl->aPacket[4] = 0x00; + ctrl->aPacket[5] = 0x00; + ctrl->aPacket[6] = 0x00; + ctrl->aPacket[7] = 0x00; + ctrl->aPacket[8] = 0x00; + ctrl->aPacket[9] = 0x00; + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); +} + +static void +r535_sor_hdmi_audio(struct nvkm_ior *sor, int head, bool enable) +{ + struct nvkm_device *device = sor->disp->engine.subdev.device; + const u32 hdmi = head * 0x400; + + r535_sor_hdmi_ctrl_audio(sor->asy.outp, enable); + r535_sor_hdmi_ctrl_audio_mute(sor->asy.outp, !enable); + + /* General Control (GCP). */ + nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000); + nvkm_wr32(device, 0x6f00cc + hdmi, !enable ? 0x00000001 : 0x00000010); + nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000001); +} + +static void +r535_sor_hdmi_ctrl(struct nvkm_ior *sor, int head, bool enable, u8 max_ac_packet, u8 rekey) +{ + struct nvkm_disp *disp = sor->disp; + NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *ctrl; + + if (!enable) + return; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return; + + ctrl->displayId = BIT(sor->asy.outp->index); + ctrl->enable = enable; + + WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl)); +} + +static const struct nvkm_ior_func_hdmi +r535_sor_hdmi = { + .ctrl = r535_sor_hdmi_ctrl, + .scdc = r535_sor_hdmi_scdc, + /*TODO: SF_USER -> KMS. */ + .infoframe_avi = gv100_sor_hdmi_infoframe_avi, + .infoframe_vsi = gv100_sor_hdmi_infoframe_vsi, + .audio = r535_sor_hdmi_audio, +}; + +static const struct nvkm_ior_func +r535_sor = { + .hdmi = &r535_sor_hdmi, + .dp = &r535_sor_dp, + .hda = &r535_sor_hda, + .bl = &r535_sor_bl, +}; + +static int +r535_sor_new(struct nvkm_disp *disp, int id) +{ + return nvkm_ior_new_(&r535_sor, disp, SOR, id, true/*XXX: hda cap*/); +} + +static int +r535_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask) +{ + *pmask = 0xf; + return 4; +} + +static void +r535_head_vblank_put(struct nvkm_head *head) +{ + struct nvkm_device *device = head->disp->engine.subdev.device; + + nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000000); +} + +static void +r535_head_vblank_get(struct nvkm_head *head) +{ + struct nvkm_device *device = head->disp->engine.subdev.device; + + nvkm_wr32(device, 0x611800 + (head->id * 4), 0x00000002); + nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000002); +} + +static void +r535_head_state(struct nvkm_head *head, struct nvkm_head_state *state) +{ +} + +static const struct nvkm_head_func +r535_head = { + .state = r535_head_state, + .vblank_get = r535_head_vblank_get, + .vblank_put = r535_head_vblank_put, +}; + +static struct nvkm_conn * +r535_conn_new(struct nvkm_disp *disp, u32 id) +{ + NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *ctrl; + struct nvbios_connE dcbE = {}; + struct nvkm_conn *conn; + int ret, index; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return (void *)ctrl; + + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(id); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ERR_PTR(ret); + } + + list_for_each_entry(conn, &disp->conns, head) { + if (conn->index == ctrl->data[0].index) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return conn; + } + } + + dcbE.type = ctrl->data[0].type; + index = ctrl->data[0].index; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + + ret = nvkm_conn_new(disp, index, &dcbE, &conn); + if (ret) + return ERR_PTR(ret); + + list_add_tail(&conn->head, &disp->conns); + return conn; +} + +static void +r535_outp_release(struct nvkm_outp *outp) +{ + outp->disp->rm.assigned_sors &= ~BIT(outp->ior->id); + outp->ior->asy.outp = NULL; + outp->ior = NULL; +} + +static int +r535_outp_acquire(struct nvkm_outp *outp, bool hda) +{ + struct nvkm_disp *disp = outp->disp; + struct nvkm_ior *ior; + NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *ctrl; + int ret, or; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DFP_ASSIGN_SOR, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(outp->index); + ctrl->sorExcludeMask = disp->rm.assigned_sors; + if (hda) + ctrl->flags |= NVDEF(NV0073_CTRL, DFP_ASSIGN_SOR_FLAGS, AUDIO, OPTIMAL); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + for (or = 0; or < ARRAY_SIZE(ctrl->sorAssignListWithTag); or++) { + if (ctrl->sorAssignListWithTag[or].displayMask & BIT(outp->index)) { + disp->rm.assigned_sors |= BIT(or); + break; + } + } + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + + if (WARN_ON(or == ARRAY_SIZE(ctrl->sorAssignListWithTag))) + return -EINVAL; + + ior = nvkm_ior_find(disp, SOR, or); + if (WARN_ON(!ior)) + return -EINVAL; + + nvkm_outp_acquire_ior(outp, NVKM_OUTP_USER, ior); + return 0; +} + +static int +r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid) +{ + NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->head = head; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + *displayid = ctrl->displayId; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} + +static struct nvkm_ior * +r535_outp_inherit(struct nvkm_outp *outp) +{ + struct nvkm_disp *disp = outp->disp; + struct nvkm_head *head; + u32 displayid; + int ret; + + list_for_each_entry(head, &disp->heads, head) { + ret = r535_disp_head_displayid(disp, head->id, &displayid); + if (WARN_ON(ret)) + return NULL; + + if (displayid == BIT(outp->index)) { + NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl; + u32 id, proto; + struct nvkm_ior *ior; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return NULL; + + ctrl->subDeviceInstance = 0; + ctrl->displayId = displayid; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return NULL; + } + + id = ctrl->index; + proto = ctrl->protocol; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + + ior = nvkm_ior_find(disp, SOR, id); + if (WARN_ON(!ior)) + return NULL; + + switch (proto) { + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A: + ior->arm.proto = TMDS; + ior->arm.link = 1; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B: + ior->arm.proto = TMDS; + ior->arm.link = 2; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS: + ior->arm.proto = TMDS; + ior->arm.link = 3; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A: + ior->arm.proto = DP; + ior->arm.link = 1; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B: + ior->arm.proto = DP; + ior->arm.link = 2; + break; + default: + WARN_ON(1); + return NULL; + } + + ior->arm.proto_evo = proto; + ior->arm.head = BIT(head->id); + disp->rm.assigned_sors |= BIT(ior->id); + return ior; + } + } + + return NULL; +} + +static int +r535_outp_dfp_get_info(struct nvkm_outp *outp) +{ + NV0073_CTRL_DFP_GET_INFO_PARAMS *ctrl; + struct nvkm_disp *disp = outp->disp; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DFP_GET_INFO, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->displayId = BIT(outp->index); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + nvkm_debug(&disp->engine.subdev, "DFP %08x: flags:%08x flags2:%08x\n", + ctrl->displayId, ctrl->flags, ctrl->flags2); + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} + +static int +r535_outp_detect(struct nvkm_outp *outp) +{ + NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl; + struct nvkm_disp *disp = outp->disp; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayMask = BIT(outp->index); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + if (ctrl->displayMask & BIT(outp->index)) { + ret = r535_outp_dfp_get_info(outp); + if (ret == 0) + ret = 1; + } else { + ret = 0; + } + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; +} + +static int +r535_dp_mst_id_put(struct nvkm_outp *outp, u32 id) +{ + NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *ctrl; + struct nvkm_disp *disp = outp->disp; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayId = id; + return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); +} + +static int +r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid) +{ + NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *ctrl; + struct nvkm_disp *disp = outp->disp; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(outp->index); + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + *pid = ctrl->displayIdAssigned; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} + +static int +r535_dp_drive(struct nvkm_outp *outp, u8 lanes, u8 pe[4], u8 vs[4]) +{ + NV0073_CTRL_DP_LANE_DATA_PARAMS *ctrl; + struct nvkm_disp *disp = outp->disp; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_SET_LANE_DATA, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->displayId = BIT(outp->index); + ctrl->numLanes = lanes; + for (int i = 0; i < lanes; i++) + ctrl->data[i] = NVVAL(NV0073_CTRL, DP_LANE_DATA, PREEMPHASIS, pe[i]) | + NVVAL(NV0073_CTRL, DP_LANE_DATA, DRIVECURRENT, vs[i]); + + return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); +} + +static int +r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8 link_bw) +{ + struct nvkm_disp *disp = outp->disp; + NV0073_CTRL_DP_CTRL_PARAMS *ctrl; + int ret, retries; + u32 cmd, data; + + cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) | + NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) | + NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES); + data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) | + NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) | + NVVAL(NV0073_CTRL, DP_DATA, TARGET, target); + + if (mst) + cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM); + + if (outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP) + cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE); + + if (target == 0 && + (outp->dp.dpcd[DPCD_RC02] & 0x20) && + !(outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED)) + cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES); + + /* We should retry up to 3 times, but only if GSP asks politely */ + for (retries = 0; retries < 3; ++retries) { + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(outp->index); + ctrl->retryTimeMs = 0; + ctrl->cmd = cmd; + ctrl->data = data; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) { + /* + * Device (likely an eDP panel) isn't ready yet, wait for the time specified + * by GSP before retrying again + */ + nvkm_debug(&disp->engine.subdev, + "Waiting %dms for GSP LT panel delay before retrying\n", + ctrl->retryTimeMs); + msleep(ctrl->retryTimeMs); + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + } else { + /* GSP didn't say to retry, or we were successful */ + if (ctrl->err) + ret = -EIO; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + break; + } + } + + return ret; +} + +static int +r535_dp_train(struct nvkm_outp *outp, bool retrain) +{ + for (int target = outp->dp.lttprs; target >= 0; target--) { + int ret = r535_dp_train_target(outp, target, outp->dp.lt.mst, + outp->dp.lt.nr, + outp->dp.lt.bw); + if (ret) + return ret; + } + + return 0; +} + +static int +r535_dp_rates(struct nvkm_outp *outp) +{ + NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl; + struct nvkm_disp *disp = outp->disp; + + if (outp->conn->info.type != DCB_CONNECTOR_eDP || + !outp->dp.rates || outp->dp.rate[0].dpcd < 0) + return 0; + + if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl))) + return -EINVAL; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->displayId = BIT(outp->index); + for (int i = 0; i < outp->dp.rates; i++) + ctrl->linkRateTbl[outp->dp.rate[i].dpcd] = outp->dp.rate[i].rate * 10 / 200; + + return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); +} + +static int +r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize) +{ + struct nvkm_disp *disp = outp->disp; + NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *ctrl; + u8 size = *psize; + int ret; + int retries; + + for (retries = 0; retries < 3; ++retries) { + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(outp->index); + ctrl->bAddrOnly = !size; + ctrl->cmd = type; + if (ctrl->bAddrOnly) { + ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE); + ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE); + } + ctrl->addr = addr; + ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0; + memcpy(ctrl->data, data, size); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) { + /* + * Device (likely an eDP panel) isn't ready yet, wait for the time specified + * by GSP before retrying again + */ + nvkm_debug(&disp->engine.subdev, + "Waiting %dms for GSP LT panel delay before retrying in AUX\n", + ctrl->retryTimeMs); + msleep(ctrl->retryTimeMs); + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + } else { + memcpy(data, ctrl->data, size); + *psize = ctrl->size; + ret = ctrl->replyType; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + break; + } + } + return ret; +} + +static int +r535_dp_aux_pwr(struct nvkm_outp *outp, bool pu) +{ + return 0; +} + +static void +r535_dp_release(struct nvkm_outp *outp) +{ + if (!outp->dp.lt.bw) { + if (!WARN_ON(!outp->dp.rates)) + outp->dp.lt.bw = outp->dp.rate[0].rate / 27000; + else + outp->dp.lt.bw = 0x06; + } + + outp->dp.lt.nr = 0; + + r535_dp_train_target(outp, 0, outp->dp.lt.mst, outp->dp.lt.nr, outp->dp.lt.bw); + r535_outp_release(outp); +} + +static int +r535_dp_acquire(struct nvkm_outp *outp, bool hda) +{ + int ret; + + ret = r535_outp_acquire(outp, hda); + if (ret) + return ret; + + return 0; +} + +static const struct nvkm_outp_func +r535_dp = { + .detect = r535_outp_detect, + .inherit = r535_outp_inherit, + .acquire = r535_dp_acquire, + .release = r535_dp_release, + .dp.aux_pwr = r535_dp_aux_pwr, + .dp.aux_xfer = r535_dp_aux_xfer, + .dp.mst_id_get = r535_dp_mst_id_get, + .dp.mst_id_put = r535_dp_mst_id_put, + .dp.rates = r535_dp_rates, + .dp.train = r535_dp_train, + .dp.drive = r535_dp_drive, +}; + +static int +r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize) +{ + NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *ctrl; + struct nvkm_disp *disp = outp->disp; + int ret = -E2BIG; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(outp->index); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + ret = -E2BIG; + if (ctrl->bufferSize <= *psize) { + memcpy(data, ctrl->edidBuffer, ctrl->bufferSize); + *psize = ctrl->bufferSize; + ret = 0; + } + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; +} + +static const struct nvkm_outp_func +r535_tmds = { + .detect = r535_outp_detect, + .inherit = r535_outp_inherit, + .acquire = r535_outp_acquire, + .release = r535_outp_release, + .edid_get = r535_tmds_edid_get, +}; + +static int +r535_outp_new(struct nvkm_disp *disp, u32 id) +{ + NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl; + enum nvkm_ior_proto proto; + struct dcb_output dcbE = {}; + struct nvkm_conn *conn; + struct nvkm_outp *outp; + u8 locn, link = 0; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayId = BIT(id); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + switch (ctrl->type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_NONE: + return 0; + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + switch (ctrl->protocol) { + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A: + proto = TMDS; + link = 1; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B: + proto = TMDS; + link = 2; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS: + proto = TMDS; + link = 3; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A: + proto = DP; + link = 1; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B: + proto = DP; + link = 2; + break; + default: + WARN_ON(1); + return -EINVAL; + } + + break; + default: + WARN_ON(1); + return -EINVAL; + } + + locn = ctrl->location; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + + conn = r535_conn_new(disp, id); + if (IS_ERR(conn)) + return PTR_ERR(conn); + + switch (proto) { + case TMDS: dcbE.type = DCB_OUTPUT_TMDS; break; + case DP: dcbE.type = DCB_OUTPUT_DP; break; + default: + WARN_ON(1); + return -EINVAL; + } + + dcbE.location = locn; + dcbE.connector = conn->index; + dcbE.heads = disp->head.mask; + dcbE.i2c_index = 0xff; + dcbE.link = dcbE.sorconf.link = link; + + if (proto == TMDS) { + ret = nvkm_outp_new_(&r535_tmds, disp, id, &dcbE, &outp); + if (ret) + return ret; + } else { + NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl; + bool mst, wm; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->sorIndex = ~0; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) { + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62: + dcbE.dpconf.link_bw = 0x06; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70: + dcbE.dpconf.link_bw = 0x0a; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40: + dcbE.dpconf.link_bw = 0x14; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10: + dcbE.dpconf.link_bw = 0x1e; + break; + default: + dcbE.dpconf.link_bw = 0x00; + break; + } + + mst = ctrl->bIsMultistreamSupported; + wm = ctrl->bHasIncreasedWatermarkLimits; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + + if (WARN_ON(!dcbE.dpconf.link_bw)) + return -EINVAL; + + dcbE.dpconf.link_nr = 4; + + ret = nvkm_outp_new_(&r535_dp, disp, id, &dcbE, &outp); + if (ret) + return ret; + + outp->dp.mst = mst; + outp->dp.increased_wm = wm; + } + + + outp->conn = conn; + list_add_tail(&outp->head, &disp->outps); + return 0; +} + +static void +r535_disp_irq(struct nvkm_gsp_event *event, void *repv, u32 repc) +{ + struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.irq); + Nv2080DpIrqNotification *irq = repv; + + if (WARN_ON(repc < sizeof(*irq))) + return; + + nvkm_debug(&disp->engine.subdev, "event: dp irq displayId %08x\n", irq->displayId); + + if (irq->displayId) + nvkm_event_ntfy(&disp->rm.event, fls(irq->displayId) - 1, NVKM_DPYID_IRQ); +} + +static void +r535_disp_hpd(struct nvkm_gsp_event *event, void *repv, u32 repc) +{ + struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.hpd); + Nv2080HotplugNotification *hpd = repv; + + if (WARN_ON(repc < sizeof(*hpd))) + return; + + nvkm_debug(&disp->engine.subdev, "event: hpd plug %08x unplug %08x\n", + hpd->plugDisplayMask, hpd->unplugDisplayMask); + + for (int i = 0; i < 31; i++) { + u32 mask = 0; + + if (hpd->plugDisplayMask & BIT(i)) + mask |= NVKM_DPYID_PLUG; + if (hpd->unplugDisplayMask & BIT(i)) + mask |= NVKM_DPYID_UNPLUG; + + if (mask) + nvkm_event_ntfy(&disp->rm.event, i, mask); + } +} + +static const struct nvkm_event_func +r535_disp_event = { +}; + +static void +r535_disp_intr_head_timing(struct nvkm_disp *disp, int head) +{ + struct nvkm_subdev *subdev = &disp->engine.subdev; + struct nvkm_device *device = subdev->device; + u32 stat = nvkm_rd32(device, 0x611c00 + (head * 0x04)); + + if (stat & 0x00000002) { + nvkm_disp_vblank(disp, head); + + nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000002); + } +} + +static irqreturn_t +r535_disp_intr(struct nvkm_inth *inth) +{ + struct nvkm_disp *disp = container_of(inth, typeof(*disp), engine.subdev.inth); + struct nvkm_subdev *subdev = &disp->engine.subdev; + struct nvkm_device *device = subdev->device; + unsigned long mask = nvkm_rd32(device, 0x611ec0) & 0x000000ff; + int head; + + for_each_set_bit(head, &mask, 8) + r535_disp_intr_head_timing(disp, head); + + return IRQ_HANDLED; +} + +static void +r535_disp_fini(struct nvkm_disp *disp, bool suspend) +{ + if (!disp->engine.subdev.use.enabled) + return; + + nvkm_gsp_rm_free(&disp->rm.object); + + if (!suspend) { + nvkm_gsp_event_dtor(&disp->rm.irq); + nvkm_gsp_event_dtor(&disp->rm.hpd); + nvkm_event_fini(&disp->rm.event); + + nvkm_gsp_rm_free(&disp->rm.objcom); + nvkm_gsp_device_dtor(&disp->rm.device); + nvkm_gsp_client_dtor(&disp->rm.client); + } +} + +static int +r535_disp_init(struct nvkm_disp *disp) +{ + int ret; + + ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, disp->func->root.oclass << 16, + disp->func->root.oclass, 0, &disp->rm.object); + if (ret) + return ret; + + return 0; +} + +static int +r535_disp_oneinit(struct nvkm_disp *disp) +{ + struct nvkm_device *device = disp->engine.subdev.device; + struct nvkm_gsp *gsp = device->gsp; + NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *ctrl; + int ret, i; + + /* RAMIN. */ + ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL, &disp->inst); + if (ret) + return ret; + + if (WARN_ON(nvkm_memory_target(disp->inst->memory) != NVKM_MEM_TARGET_VRAM)) + return -EINVAL; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->instMemPhysAddr = nvkm_memory_addr(disp->inst->memory); + ctrl->instMemSize = nvkm_memory_size(disp->inst->memory); + ctrl->instMemAddrSpace = ADDR_FBMEM; + ctrl->instMemCpuCacheAttr = NV_MEMORY_WRITECOMBINED; + + ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); + if (ret) + return ret; + + /* OBJs. */ + ret = nvkm_gsp_client_device_ctor(gsp, &disp->rm.client, &disp->rm.device); + if (ret) + return ret; + + ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, 0x00730000, NV04_DISPLAY_COMMON, 0, + &disp->rm.objcom); + if (ret) + return ret; + + { + NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + disp->wndw.mask = ctrl->windowPresentMask; + disp->wndw.nr = fls(disp->wndw.mask); + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + } + + /* */ + { +#if defined(CONFIG_ACPI) && defined(CONFIG_X86) + NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS *ctrl; + struct nvkm_gsp_object *subdevice = &disp->rm.client.gsp->internal.device.subdevice; + + ctrl = nvkm_gsp_rm_ctrl_get(subdevice, + NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->status = 0x56; /* NV_ERR_NOT_SUPPORTED */ + + { + const guid_t NBCI_DSM_GUID = + GUID_INIT(0xD4A50B75, 0x65C7, 0x46F7, + 0xBF, 0xB7, 0x41, 0x51, 0x4C, 0xEA, 0x02, 0x44); + u64 NBCI_DSM_REV = 0x00000102; + const guid_t NVHG_DSM_GUID = + GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48, + 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4); + u64 NVHG_DSM_REV = 0x00000102; + acpi_handle handle = ACPI_HANDLE(device->dev); + + if (handle && acpi_has_method(handle, "_DSM")) { + bool nbci = acpi_check_dsm(handle, &NBCI_DSM_GUID, NBCI_DSM_REV, + 1ULL << 0x00000014); + bool nvhg = acpi_check_dsm(handle, &NVHG_DSM_GUID, NVHG_DSM_REV, + 1ULL << 0x00000014); + + if (nbci || nvhg) { + union acpi_object argv4 = { + .buffer.type = ACPI_TYPE_BUFFER, + .buffer.length = sizeof(ctrl->backLightData), + .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), + }, *obj; + + obj = acpi_evaluate_dsm(handle, nbci ? &NBCI_DSM_GUID : &NVHG_DSM_GUID, + 0x00000102, 0x14, &argv4); + if (!obj) { + acpi_handle_info(handle, "failed to evaluate _DSM\n"); + } else { + for (int i = 0; i < obj->package.count; i++) { + union acpi_object *elt = &obj->package.elements[i]; + u32 size; + + if (elt->integer.value & ~0xffffffffULL) + size = 8; + else + size = 4; + + memcpy(&ctrl->backLightData[ctrl->backLightDataSize], &elt->integer.value, size); + ctrl->backLightDataSize += size; + } + + ctrl->status = 0; + ACPI_FREE(obj); + } + + kfree(argv4.buffer.pointer); + } + } + } + + ret = nvkm_gsp_rm_ctrl_wr(subdevice, ctrl); + if (ret) + return ret; +#endif + } + + /* */ + { + NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ret = nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); + if (ret) + return ret; + } + + /* */ + { + NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom, + NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + disp->head.nr = ctrl->numHeads; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + } + + /* */ + { + NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom, + NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + disp->head.mask = ctrl->headMask; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + + for_each_set_bit(i, &disp->head.mask, disp->head.nr) { + ret = nvkm_head_new_(&r535_head, disp, i); + if (ret) + return ret; + } + } + + disp->sor.nr = disp->func->sor.cnt(disp, &disp->sor.mask); + nvkm_debug(&disp->engine.subdev, " SOR(s): %d (%02lx)\n", disp->sor.nr, disp->sor.mask); + for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) { + ret = disp->func->sor.new(disp, i); + if (ret) + return ret; + } + + /* */ + { + NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl; + unsigned long mask; + int i; + + ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom, + NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + mask = ctrl->displayMask; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + + for_each_set_bit(i, &mask, 32) { + ret = r535_outp_new(disp, i); + if (ret) + return ret; + } + } + + ret = nvkm_event_init(&r535_disp_event, &gsp->subdev, 3, 32, &disp->rm.event); + if (WARN_ON(ret)) + return ret; + + ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0000, NV2080_NOTIFIERS_HOTPLUG, + r535_disp_hpd, &disp->rm.hpd); + if (ret) + return ret; + + ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0001, NV2080_NOTIFIERS_DP_IRQ, + r535_disp_irq, &disp->rm.irq); + if (ret) + return ret; + + /* RAMHT. */ + ret = nvkm_ramht_new(device, disp->func->ramht_size ? disp->func->ramht_size : + 0x1000, 0, disp->inst, &disp->ramht); + if (ret) + return ret; + + ret = nvkm_gsp_intr_stall(gsp, disp->engine.subdev.type, disp->engine.subdev.inst); + if (ret < 0) + return ret; + + ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &disp->engine.subdev, + r535_disp_intr, &disp->engine.subdev.inth); + if (ret) + return ret; + + nvkm_inth_allow(&disp->engine.subdev.inth); + return 0; +} + +static void +r535_disp_dtor(struct nvkm_disp *disp) +{ + kfree(disp->func); +} + +int +r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp) +{ + struct nvkm_disp_func *rm; + int ret; + + if (!(rm = kzalloc(sizeof(*rm) + 6 * sizeof(rm->user[0]), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_disp_dtor; + rm->oneinit = r535_disp_oneinit; + rm->init = r535_disp_init; + rm->fini = r535_disp_fini; + rm->uevent = hw->uevent; + rm->sor.cnt = r535_sor_cnt; + rm->sor.new = r535_sor_new; + rm->ramht_size = hw->ramht_size; + + rm->root = hw->root; + + for (int i = 0; hw->user[i].ctor; i++) { + switch (hw->user[i].base.oclass & 0xff) { + case 0x73: rm->user[i] = hw->user[i]; break; + case 0x7d: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_core; break; + case 0x7e: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wndw; break; + case 0x7b: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wimm; break; + case 0x7a: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_curs; break; + default: + WARN_ON(1); + continue; + } + } + + ret = nvkm_disp_new_(rm, device, type, inst, pdisp); + if (ret) + kfree(rm); + + mutex_init(&(*pdisp)->super.mutex); //XXX + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c new file mode 100644 index 000000000000..6305f3a93810 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c @@ -0,0 +1,333 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +struct fbsr_item { + const char *type; + u64 addr; + u64 size; + + struct list_head head; +}; + +struct fbsr { + struct list_head items; + + u64 size; + int regions; + + struct nvkm_gsp_client client; + struct nvkm_gsp_device device; + + u64 hmemory; + u64 sys_offset; +}; + +static int +fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper, + u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object) +{ + struct nvkm_gsp_client *client = device->object.client; + struct nvkm_gsp *gsp = client->gsp; + const u32 pages = size / GSP_PAGE_SIZE; + rpc_alloc_memory_v13_01 *rpc; + int ret; + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY, + sizeof(*rpc) + pages * sizeof(rpc->pteDesc.pte_pde[0])); + if (IS_ERR(rpc)) + return PTR_ERR(rpc); + + rpc->hClient = client->object.handle; + rpc->hDevice = device->object.handle; + rpc->hMemory = handle; + if (aper == NVKM_MEM_TARGET_HOST) { + rpc->hClass = NV01_MEMORY_LIST_SYSTEM; + rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, NONCONTIGUOUS) | + NVDEF(NVOS02, FLAGS, LOCATION, PCI) | + NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP); + } else { + rpc->hClass = NV01_MEMORY_LIST_FBMEM; + rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, CONTIGUOUS) | + NVDEF(NVOS02, FLAGS, LOCATION, VIDMEM) | + NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP); + rpc->format = 6; /* NV_MMU_PTE_KIND_GENERIC_MEMORY */ + } + rpc->pteAdjust = 0; + rpc->length = size; + rpc->pageCount = pages; + rpc->pteDesc.idr = 0; + rpc->pteDesc.reserved1 = 0; + rpc->pteDesc.length = pages; + + if (sgt) { + struct scatterlist *sgl; + int pte = 0, idx; + + for_each_sgtable_dma_sg(sgt, sgl, idx) { + for (int i = 0; i < sg_dma_len(sgl) / GSP_PAGE_SIZE; i++) + rpc->pteDesc.pte_pde[pte++].pte = (sg_dma_address(sgl) >> 12) + i; + + } + } else { + for (int i = 0; i < pages; i++) + rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i; + } + + ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_POLL); + if (ret) + return ret; + + object->client = device->object.client; + object->parent = &device->object; + object->handle = handle; + return 0; +} + +static int +fbsr_send(struct fbsr *fbsr, struct fbsr_item *item) +{ + NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS *ctrl; + struct nvkm_gsp *gsp = fbsr->client.gsp; + struct nvkm_gsp_object memlist; + int ret; + + ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM, + item->addr, item->size, NULL, &memlist); + if (ret) + return ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) { + ret = PTR_ERR(ctrl); + goto done; + } + + ctrl->fbsrType = FBSR_TYPE_DMA; + ctrl->hClient = fbsr->client.object.handle; + ctrl->hVidMem = fbsr->hmemory++; + ctrl->vidOffset = 0; + ctrl->sysOffset = fbsr->sys_offset; + ctrl->size = item->size; + + ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); +done: + nvkm_gsp_rm_free(&memlist); + if (ret) + return ret; + + fbsr->sys_offset += item->size; + return 0; +} + +static int +fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size) +{ + NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl; + struct nvkm_gsp *gsp = fbsr->client.gsp; + struct nvkm_gsp_object memlist; + int ret; + + ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST, + 0, fbsr->size, sgt, &memlist); + if (ret) + return ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->fbsrType = FBSR_TYPE_DMA; + ctrl->numRegions = fbsr->regions; + ctrl->hClient = fbsr->client.object.handle; + ctrl->hSysMem = fbsr->hmemory++; + ctrl->gspFbAllocsSysOffset = items_size; + + ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); + if (ret) + return ret; + + nvkm_gsp_rm_free(&memlist); + return 0; +} + +static bool +fbsr_vram(struct fbsr *fbsr, const char *type, u64 addr, u64 size) +{ + struct fbsr_item *item; + + if (!(item = kzalloc(sizeof(*item), GFP_KERNEL))) + return false; + + item->type = type; + item->addr = addr; + item->size = size; + list_add_tail(&item->head, &fbsr->items); + return true; +} + +static bool +fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory) +{ + return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory)); +} + +static void +r535_instmem_resume(struct nvkm_instmem *imem) +{ + /* RM has restored VRAM contents already, so just need to free the sysmem buffer. */ + if (imem->rm.fbsr_valid) { + nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr); + imem->rm.fbsr_valid = false; + } +} + +static int +r535_instmem_suspend(struct nvkm_instmem *imem) +{ + struct nvkm_subdev *subdev = &imem->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_gsp *gsp = device->gsp; + struct nvkm_instobj *iobj; + struct fbsr fbsr = {}; + struct fbsr_item *item, *temp; + u64 items_size; + int ret; + + INIT_LIST_HEAD(&fbsr.items); + fbsr.hmemory = 0xcaf00003; + + /* Create a list of all regions we need RM to save during suspend. */ + list_for_each_entry(iobj, &imem->list, head) { + if (iobj->preserve) { + if (!fbsr_inst(&fbsr, "inst", &iobj->memory)) + return -ENOMEM; + } + } + + list_for_each_entry(iobj, &imem->boot, head) { + if (!fbsr_inst(&fbsr, "boot", &iobj->memory)) + return -ENOMEM; + } + + if (!fbsr_vram(&fbsr, "gsp-non-wpr", gsp->fb.heap.addr, gsp->fb.heap.size)) + return -ENOMEM; + + /* Determine memory requirements. */ + list_for_each_entry(item, &fbsr.items, head) { + nvkm_debug(subdev, "fbsr: %016llx %016llx %s\n", + item->addr, item->size, item->type); + fbsr.size += item->size; + fbsr.regions++; + } + + items_size = fbsr.size; + nvkm_debug(subdev, "fbsr: %d regions (0x%llx bytes)\n", fbsr.regions, items_size); + + fbsr.size += gsp->fb.rsvd_size; + fbsr.size += gsp->fb.bios.vga_workspace.size; + nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size); + + ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr); + if (ret) + goto done; + + /* Tell RM about the sysmem which will hold VRAM contents across suspend. */ + ret = nvkm_gsp_client_device_ctor(gsp, &fbsr.client, &fbsr.device); + if (ret) + goto done_sgt; + + ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size); + if (WARN_ON(ret)) + goto done_sgt; + + /* Send VRAM regions that need saving. */ + list_for_each_entry(item, &fbsr.items, head) { + ret = fbsr_send(&fbsr, item); + if (WARN_ON(ret)) + goto done_sgt; + } + + imem->rm.fbsr_valid = true; + + /* Cleanup everything except the sysmem backup, which will be removed after resume. */ +done_sgt: + if (ret) /* ... unless we failed already. */ + nvkm_gsp_sg_free(device, &imem->rm.fbsr); +done: + list_for_each_entry_safe(item, temp, &fbsr.items, head) { + list_del(&item->head); + kfree(item); + } + + nvkm_gsp_device_dtor(&fbsr.device); + nvkm_gsp_client_dtor(&fbsr.client); + return ret; +} + +static void * +r535_instmem_dtor(struct nvkm_instmem *imem) +{ + kfree(imem->func); + return imem; +} + +int +r535_instmem_new(const struct nvkm_instmem_func *hw, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_instmem **pinstmem) +{ + struct nvkm_instmem_func *rm; + int ret; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_instmem_dtor; + rm->fini = hw->fini; + rm->suspend = r535_instmem_suspend; + rm->resume = r535_instmem_resume; + rm->memory_new = hw->memory_new; + rm->memory_wrap = hw->memory_wrap; + rm->zero = false; + + ret = nv50_instmem_new_(rm, device, type, inst, pinstmem); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c new file mode 100644 index 000000000000..621e5dfe898a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c @@ -0,0 +1,550 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static u32 +r535_chan_doorbell_handle(struct nvkm_chan *chan) +{ + return (chan->cgrp->runl->id << 16) | chan->id; +} + +static void +r535_chan_stop(struct nvkm_chan *chan) +{ +} + +static void +r535_chan_start(struct nvkm_chan *chan) +{ +} + +static void +r535_chan_ramfc_clear(struct nvkm_chan *chan) +{ + struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; + + nvkm_gsp_rm_free(&chan->rm.object); + + dma_free_coherent(fifo->engine.subdev.device->dev, fifo->rm.mthdbuf_size, + chan->rm.mthdbuf.ptr, chan->rm.mthdbuf.addr); + + nvkm_cgrp_vctx_put(chan->cgrp, &chan->rm.grctx); +} + +#define CHID_PER_USERD 8 + +static int +r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv) +{ + struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; + struct nvkm_engn *engn; + struct nvkm_device *device = fifo->engine.subdev.device; + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args; + const int userd_p = chan->id / CHID_PER_USERD; + const int userd_i = chan->id % CHID_PER_USERD; + u32 eT = ~0; + int ret; + + if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) { + ret = nvkm_subdev_oneinit(&device->gr->engine.subdev); + if (ret) + return ret; + } + + nvkm_runl_foreach_engn(engn, chan->cgrp->runl) { + eT = engn->id; + break; + } + + if (WARN_ON(eT == ~0)) + return -EINVAL; + + chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev, + fifo->rm.mthdbuf_size, + &chan->rm.mthdbuf.addr, GFP_KERNEL); + if (!chan->rm.mthdbuf.ptr) + return -ENOMEM; + + args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, 0xf1f00000 | chan->id, + fifo->func->chan.user.oclass, sizeof(*args), + &chan->rm.object); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->gpFifoOffset = offset; + args->gpFifoEntries = length / 8; + + args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL); + args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE); + args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, chan->runq); + if (!priv) + args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE); + else + args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE); + args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE); + + args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE); + args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE); + + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT); + args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE); + + args->hVASpace = chan->vmm->rm.object.handle; + args->engineType = eT; + + args->instanceMem.base = chan->inst->addr; + args->instanceMem.size = chan->inst->size; + args->instanceMem.addressSpace = 2; + args->instanceMem.cacheAttrib = 1; + + args->userdMem.base = nvkm_memory_addr(chan->userd.mem) + chan->userd.base; + args->userdMem.size = fifo->func->chan.func->userd->size; + args->userdMem.addressSpace = 2; + args->userdMem.cacheAttrib = 1; + + args->ramfcMem.base = chan->inst->addr + 0; + args->ramfcMem.size = 0x200; + args->ramfcMem.addressSpace = 2; + args->ramfcMem.cacheAttrib = 1; + + args->mthdbufMem.base = chan->rm.mthdbuf.addr; + args->mthdbufMem.size = fifo->rm.mthdbuf_size; + args->mthdbufMem.addressSpace = 1; + args->mthdbufMem.cacheAttrib = 0; + + if (!priv) + args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER); + else + args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN); + args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE); + args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE); + + ret = nvkm_gsp_rm_alloc_wr(&chan->rm.object, args); + if (ret) + return ret; + + if (1) { + NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *ctrl; + + if (1) { + NVA06F_CTRL_BIND_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object, + NVA06F_CTRL_CMD_BIND, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return PTR_ERR(ctrl); + + ctrl->engineType = eT; + + ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl); + if (ret) + return ret; + } + + ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object, + NVA06F_CTRL_CMD_GPFIFO_SCHEDULE, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return PTR_ERR(ctrl); + + ctrl->bEnable = 1; + ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl); + } + + return ret; +} + +static const struct nvkm_chan_func_ramfc +r535_chan_ramfc = { + .write = r535_chan_ramfc_write, + .clear = r535_chan_ramfc_clear, + .devm = 0xfff, + .priv = true, +}; + +static const struct nvkm_chan_func +r535_chan = { + .inst = &gf100_chan_inst, + .userd = &gv100_chan_userd, + .ramfc = &r535_chan_ramfc, + .start = r535_chan_start, + .stop = r535_chan_stop, + .doorbell_handle = r535_chan_doorbell_handle, +}; + +static const struct nvkm_cgrp_func +r535_cgrp = { +}; + +static int +r535_engn_nonstall(struct nvkm_engn *engn) +{ + struct nvkm_subdev *subdev = &engn->engine->subdev; + int ret; + + ret = nvkm_gsp_intr_nonstall(subdev->device->gsp, subdev->type, subdev->inst); + WARN_ON(ret == -ENOENT); + return ret; +} + +static const struct nvkm_engn_func +r535_ce = { + .nonstall = r535_engn_nonstall, +}; + +static int +r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan) +{ + /* RM requires GR context buffers to remain mapped until after the + * channel has been destroyed (as opposed to after the last gr obj + * has been deleted). + * + * Take an extra ref here, which will be released once the channel + * object has been deleted. + */ + refcount_inc(&vctx->refs); + chan->rm.grctx = vctx; + return 0; +} + +static const struct nvkm_engn_func +r535_gr = { + .nonstall = r535_engn_nonstall, + .ctor2 = r535_gr_ctor, +}; + +static int +r535_flcn_bind(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan) +{ + struct nvkm_gsp_client *client = &chan->vmm->rm.client; + NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&chan->vmm->rm.device.subdevice, + NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->hClient = client->object.handle; + ctrl->hObject = chan->rm.object.handle; + ctrl->hChanClient = client->object.handle; + ctrl->virtAddress = vctx->vma->addr; + ctrl->size = vctx->inst->size; + ctrl->engineType = engn->id; + ctrl->ChID = chan->id; + + return nvkm_gsp_rm_ctrl_wr(&chan->vmm->rm.device.subdevice, ctrl); +} + +static int +r535_flcn_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan) +{ + int ret; + + if (WARN_ON(!engn->rm.size)) + return -EINVAL; + + ret = nvkm_gpuobj_new(engn->engine->subdev.device, engn->rm.size, 0, true, NULL, + &vctx->inst); + if (ret) + return ret; + + ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma); + if (ret) + return ret; + + ret = nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, NULL, 0); + if (ret) + return ret; + + return r535_flcn_bind(engn, vctx, chan); +} + +static const struct nvkm_engn_func +r535_flcn = { + .nonstall = r535_engn_nonstall, + .ctor2 = r535_flcn_ctor, +}; + +static void +r535_runl_allow(struct nvkm_runl *runl, u32 engm) +{ +} + +static void +r535_runl_block(struct nvkm_runl *runl, u32 engm) +{ +} + +static const struct nvkm_runl_func +r535_runl = { + .block = r535_runl_block, + .allow = r535_runl_allow, +}; + +static int +r535_fifo_2080_type(enum nvkm_subdev_type type, int inst) +{ + switch (type) { + case NVKM_ENGINE_GR: return NV2080_ENGINE_TYPE_GR0; + case NVKM_ENGINE_CE: return NV2080_ENGINE_TYPE_COPY0 + inst; + case NVKM_ENGINE_SEC2: return NV2080_ENGINE_TYPE_SEC2; + case NVKM_ENGINE_NVDEC: return NV2080_ENGINE_TYPE_NVDEC0 + inst; + case NVKM_ENGINE_NVENC: return NV2080_ENGINE_TYPE_NVENC0 + inst; + case NVKM_ENGINE_NVJPG: return NV2080_ENGINE_TYPE_NVJPEG0 + inst; + case NVKM_ENGINE_OFA: return NV2080_ENGINE_TYPE_OFA; + case NVKM_ENGINE_SW: return NV2080_ENGINE_TYPE_SW; + default: + break; + } + + WARN_ON(1); + return -EINVAL; +} + +static int +r535_fifo_engn_type(RM_ENGINE_TYPE rm, enum nvkm_subdev_type *ptype) +{ + switch (rm) { + case RM_ENGINE_TYPE_GR0: + *ptype = NVKM_ENGINE_GR; + return 0; + case RM_ENGINE_TYPE_COPY0...RM_ENGINE_TYPE_COPY9: + *ptype = NVKM_ENGINE_CE; + return rm - RM_ENGINE_TYPE_COPY0; + case RM_ENGINE_TYPE_NVDEC0...RM_ENGINE_TYPE_NVDEC7: + *ptype = NVKM_ENGINE_NVDEC; + return rm - RM_ENGINE_TYPE_NVDEC0; + case RM_ENGINE_TYPE_NVENC0...RM_ENGINE_TYPE_NVENC2: + *ptype = NVKM_ENGINE_NVENC; + return rm - RM_ENGINE_TYPE_NVENC0; + case RM_ENGINE_TYPE_SW: + *ptype = NVKM_ENGINE_SW; + return 0; + case RM_ENGINE_TYPE_SEC2: + *ptype = NVKM_ENGINE_SEC2; + return 0; + case RM_ENGINE_TYPE_NVJPEG0...RM_ENGINE_TYPE_NVJPEG7: + *ptype = NVKM_ENGINE_NVJPG; + return rm - RM_ENGINE_TYPE_NVJPEG0; + case RM_ENGINE_TYPE_OFA: + *ptype = NVKM_ENGINE_OFA; + return 0; + default: + return -EINVAL; + } +} + +static int +r535_fifo_ectx_size(struct nvkm_fifo *fifo) +{ + NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl; + struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp; + struct nvkm_runl *runl; + struct nvkm_engn *engn; + + ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO, + sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return PTR_ERR(ctrl); + + for (int i = 0; i < ctrl->numConstructedFalcons; i++) { + nvkm_runl_foreach(runl, fifo) { + nvkm_runl_foreach_engn(engn, runl) { + if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) { + engn->rm.size = + ctrl->constructedFalconsTable[i].ctxBufferSize; + break; + } + } + } + } + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return 0; +} + +static int +r535_fifo_runl_ctor(struct nvkm_fifo *fifo) +{ + struct nvkm_subdev *subdev = &fifo->engine.subdev; + struct nvkm_gsp *gsp = subdev->device->gsp; + struct nvkm_runl *runl; + struct nvkm_engn *engn; + u32 cgids = 2048; + u32 chids = 2048; + int ret; + NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl; + + if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, cgids, 0, cgids, &fifo->cgid)) || + (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid))) + return ret; + + ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return PTR_ERR(ctrl); + + for (int i = 0; i < ctrl->numEntries; i++) { + const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE]; + const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST]; + + runl = nvkm_runl_get(fifo, id, addr); + if (!runl) { + runl = nvkm_runl_new(fifo, id, addr, 0); + if (WARN_ON(IS_ERR(runl))) + continue; + } + } + + for (int i = 0; i < ctrl->numEntries; i++) { + const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE]; + const u32 rmid = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RM_ENGINE_TYPE]; + const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST]; + enum nvkm_subdev_type type; + int inst, nv2080; + + runl = nvkm_runl_get(fifo, id, addr); + if (!runl) + continue; + + inst = r535_fifo_engn_type(rmid, &type); + if (inst < 0) { + nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid); + nvkm_runl_del(runl); + continue; + } + + nv2080 = r535_fifo_2080_type(type, inst); + if (nv2080 < 0) { + nvkm_runl_del(runl); + continue; + } + + switch (type) { + case NVKM_ENGINE_CE: + engn = nvkm_runl_add(runl, nv2080, &r535_ce, type, inst); + break; + case NVKM_ENGINE_GR: + engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst); + break; + case NVKM_ENGINE_NVDEC: + case NVKM_ENGINE_NVENC: + case NVKM_ENGINE_NVJPG: + case NVKM_ENGINE_OFA: + engn = nvkm_runl_add(runl, nv2080, &r535_flcn, type, inst); + break; + case NVKM_ENGINE_SW: + continue; + default: + engn = NULL; + break; + } + + if (!engn) { + nvkm_runl_del(runl); + continue; + } + + engn->rm.desc = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_ENG_DESC]; + } + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + + { + NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + fifo->rm.mthdbuf_size = ctrl->size; + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + } + + return r535_fifo_ectx_size(fifo); +} + +static void +r535_fifo_dtor(struct nvkm_fifo *fifo) +{ + kfree(fifo->func); +} + +int +r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo) +{ + struct nvkm_fifo_func *rm; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_fifo_dtor; + rm->runl_ctor = r535_fifo_runl_ctor; + rm->runl = &r535_runl; + rm->cgrp = hw->cgrp; + rm->cgrp.func = &r535_cgrp; + rm->chan = hw->chan; + rm->chan.func = &r535_chan; + rm->nonstall = &ga100_fifo_nonstall; + rm->nonstall_ctor = ga100_fifo_nonstall_ctor; + + return nvkm_fifo_new_(rm, device, type, inst, pfifo); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c new file mode 100644 index 000000000000..37bde547ae65 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c @@ -0,0 +1,508 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include + +#define r535_gr(p) container_of((p), struct r535_gr, base) + +#define R515_GR_MAX_CTXBUFS 9 + +struct r535_gr { + struct nvkm_gr base; + + struct { + u16 bufferId; + u32 size; + u8 page; + u8 align; + bool global; + bool init; + bool ro; + } ctxbuf[R515_GR_MAX_CTXBUFS]; + int ctxbuf_nr; + + struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS]; +}; + +struct r535_gr_chan { + struct nvkm_object object; + struct r535_gr *gr; + + struct nvkm_vmm *vmm; + struct nvkm_chan *chan; + + struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS]; + struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS]; +}; + +struct r535_gr_obj { + struct nvkm_object object; + struct nvkm_gsp_object rm; +}; + +static void * +r535_gr_obj_dtor(struct nvkm_object *object) +{ + struct r535_gr_obj *obj = container_of(object, typeof(*obj), object); + + nvkm_gsp_rm_free(&obj->rm); + return obj; +} + +static const struct nvkm_object_func +r535_gr_obj = { + .dtor = r535_gr_obj_dtor, +}; + +static int +r535_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object); + struct r535_gr_obj *obj; + + if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&r535_gr_obj, oclass, &obj->object); + *pobject = &obj->object; + + return nvkm_gsp_rm_alloc(&chan->chan->rm.object, oclass->handle, oclass->base.oclass, 0, + &obj->rm); +} + +static void * +r535_gr_chan_dtor(struct nvkm_object *object) +{ + struct r535_gr_chan *grc = container_of(object, typeof(*grc), object); + struct r535_gr *gr = grc->gr; + + for (int i = 0; i < gr->ctxbuf_nr; i++) { + nvkm_vmm_put(grc->vmm, &grc->vma[i]); + nvkm_memory_unref(&grc->mem[i]); + } + + nvkm_vmm_unref(&grc->vmm); + return grc; +} + +static const struct nvkm_object_func +r535_gr_chan = { + .dtor = r535_gr_chan_dtor, +}; + +static int +r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm, + struct nvkm_memory **pmem, struct nvkm_vma **pvma, + struct nvkm_gsp_object *chan) +{ + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_device *device = subdev->device; + NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice, + NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return PTR_ERR(ctrl); + + ctrl->engineType = 1; + ctrl->hChanClient = vmm->rm.client.object.handle; + ctrl->hObject = chan->handle; + + for (int i = 0; i < gr->ctxbuf_nr; i++) { + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry = + &ctrl->promoteEntry[ctrl->entryCount]; + const bool alloc = golden || !gr->ctxbuf[i].global; + int ret; + + entry->bufferId = gr->ctxbuf[i].bufferId; + entry->bInitialize = gr->ctxbuf[i].init && alloc; + + if (alloc) { + ret = nvkm_memory_new(device, gr->ctxbuf[i].init ? + NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST, + gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page, + gr->ctxbuf[i].init, &pmem[i]); + if (WARN_ON(ret)) + return ret; + + if (gr->ctxbuf[i].bufferId == + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) + entry->bNonmapped = 1; + } else { + if (gr->ctxbuf[i].bufferId == + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP) + continue; + + pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]); + } + + if (!entry->bNonmapped) { + struct gf100_vmm_map_v0 args = { + .priv = 1, + .ro = gr->ctxbuf[i].ro, + }; + + mutex_lock(&vmm->mutex.vmm); + ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align, + nvkm_memory_size(pmem[i]), &pvma[i]); + mutex_unlock(&vmm->mutex.vmm); + if (ret) + return ret; + + ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args)); + if (ret) + return ret; + + entry->gpuVirtAddr = pvma[i]->addr; + } + + if (entry->bInitialize) { + entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]); + entry->size = gr->ctxbuf[i].size; + entry->physAttr = 4; + } + + nvkm_debug(subdev, + "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n", + entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size, + entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped); + + ctrl->entryCount++; + } + + return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl); +} + +static int +r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass, + struct nvkm_object **pobject) +{ + struct r535_gr *gr = r535_gr(base); + struct r535_gr_chan *grc; + int ret; + + if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object); + grc->gr = gr; + grc->vmm = nvkm_vmm_ref(chan->vmm); + grc->chan = chan; + *pobject = &grc->object; + + ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object); + if (ret) + return ret; + + return 0; +} + +static u64 +r535_gr_units(struct nvkm_gr *gr) +{ + struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp; + + return (gsp->gr.tpcs << 8) | gsp->gr.gpcs; +} + +static int +r535_gr_oneinit(struct nvkm_gr *base) +{ + NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info; + struct r535_gr *gr = container_of(base, typeof(*gr), base); + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_gsp *gsp = device->gsp; + struct nvkm_mmu *mmu = device->mmu; + struct { + struct nvkm_memory *inst; + struct nvkm_vmm *vmm; + struct nvkm_gsp_object chan; + struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS]; + } golden = {}; + int ret; + + /* Allocate a channel to use for golden context init. */ + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst); + if (ret) + goto done; + + ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm); + if (ret) + goto done; + + ret = mmu->func->promote_vmm(golden.vmm); + if (ret) + goto done; + + { + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args; + + args = nvkm_gsp_rm_alloc_get(&golden.vmm->rm.device.object, 0xf1f00000, + device->fifo->func->chan.user.oclass, + sizeof(*args), &golden.chan); + if (IS_ERR(args)) { + ret = PTR_ERR(args); + goto done; + } + + args->gpFifoOffset = 0; + args->gpFifoEntries = 0x1000 / 8; + args->flags = + NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL) | + NVDEF(NVOS04, FLAGS, VPR, FALSE) | + NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE) | + NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, 0) | + NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE) | + NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE) | + NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE) | + NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, 0) | + NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE) | + NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, 0) | + NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE) | + NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE) | + NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE) | + NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE) | + NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE) | + NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE) | + NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE) | + NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT) | + NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE) | + NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE); + args->hVASpace = golden.vmm->rm.object.handle; + args->engineType = 1; + args->instanceMem.base = nvkm_memory_addr(golden.inst); + args->instanceMem.size = 0x1000; + args->instanceMem.addressSpace = 2; + args->instanceMem.cacheAttrib = 1; + args->ramfcMem.base = nvkm_memory_addr(golden.inst); + args->ramfcMem.size = 0x200; + args->ramfcMem.addressSpace = 2; + args->ramfcMem.cacheAttrib = 1; + args->userdMem.base = nvkm_memory_addr(golden.inst) + 0x1000; + args->userdMem.size = 0x200; + args->userdMem.addressSpace = 2; + args->userdMem.cacheAttrib = 1; + args->mthdbufMem.base = nvkm_memory_addr(golden.inst) + 0x2000; + args->mthdbufMem.size = 0x5000; + args->mthdbufMem.addressSpace = 2; + args->mthdbufMem.cacheAttrib = 1; + args->internalFlags = + NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN) | + NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE) | + NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE); + + ret = nvkm_gsp_rm_alloc_wr(&golden.chan, args); + if (ret) + goto done; + } + + /* Fetch context buffer info from RM and allocate each of them here to use + * during golden context init (or later as a global context buffer). + * + * Also build the information that'll be used to create channel contexts. + */ + info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO, + sizeof(*info)); + if (WARN_ON(IS_ERR(info))) { + ret = PTR_ERR(info); + goto done; + } + + for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) { + static const struct { + u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */ + u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */ + bool global; + bool init; + bool ro; + } map[] = { +#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \ + .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \ + .global = (G), .init = (I), .ro = (R) } +#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R)) + /* global init ro */ + _A( GRAPHICS, MAIN, false, true, false), + _B( PATCH, false, true, false), + _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false), + _B( PAGEPOOL, true, false, false), + _B( ATTRIBUTE_CB, true, false, false), + _B( RTV_CB_GLOBAL, true, false, false), + _B( FECS_EVENT, true, true, false), + _B( PRIV_ACCESS_MAP, true, true, true), +#undef _B +#undef _A + }; + u32 size = info->engineContextBuffersInfo[0].engine[i].size; + u8 align, page; + int id; + + for (id = 0; id < ARRAY_SIZE(map); id++) { + if (map[id].id0 == i) + break; + } + + nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i, + size, (id < ARRAY_SIZE(map)) ? "*" : ""); + if (id >= ARRAY_SIZE(map)) + continue; + + if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN) + size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */ + + if (size >= 1 << 21) page = 21; + else if (size >= 1 << 16) page = 16; + else page = 12; + + if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB) + align = order_base_2(size); + else + align = page; + + if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf))) + continue; + + gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1; + gr->ctxbuf[gr->ctxbuf_nr].size = size; + gr->ctxbuf[gr->ctxbuf_nr].page = page; + gr->ctxbuf[gr->ctxbuf_nr].align = align; + gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global; + gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init; + gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro; + gr->ctxbuf_nr++; + + if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) { + if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf))) + continue; + + gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1]; + gr->ctxbuf[gr->ctxbuf_nr].bufferId = + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP; + gr->ctxbuf_nr++; + } + } + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info); + + /* Promote golden context to RM. */ + ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan); + if (ret) + goto done; + + /* Allocate 3D class on channel to trigger golden context init in RM. */ + { + int i; + + for (i = 0; gr->base.func->sclass[i].ctor; i++) { + if ((gr->base.func->sclass[i].oclass & 0xff) == 0x97) { + struct nvkm_gsp_object threed; + + ret = nvkm_gsp_rm_alloc(&golden.chan, 0x97000000, + gr->base.func->sclass[i].oclass, 0, + &threed); + if (ret) + goto done; + + nvkm_gsp_rm_free(&threed); + break; + } + } + + if (WARN_ON(!gr->base.func->sclass[i].ctor)) { + ret = -EINVAL; + goto done; + } + } + +done: + nvkm_gsp_rm_free(&golden.chan); + for (int i = gr->ctxbuf_nr - 1; i >= 0; i--) + nvkm_vmm_put(golden.vmm, &golden.vma[i]); + nvkm_vmm_unref(&golden.vmm); + nvkm_memory_unref(&golden.inst); + return ret; + +} + +static void * +r535_gr_dtor(struct nvkm_gr *base) +{ + struct r535_gr *gr = r535_gr(base); + + while (gr->ctxbuf_nr) + nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]); + + kfree(gr->base.func); + return gr; +} + +int +r535_gr_new(const struct gf100_gr_func *hw, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) +{ + struct nvkm_gr_func *rm; + struct r535_gr *gr; + int nclass; + + for (nclass = 0; hw->sclass[nclass].oclass; nclass++); + + if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_gr_dtor; + rm->oneinit = r535_gr_oneinit; + rm->units = r535_gr_units; + rm->chan_new = r535_gr_chan_new; + + for (int i = 0; i < nclass; i++) { + rm->sclass[i].minver = hw->sclass[i].minver; + rm->sclass[i].maxver = hw->sclass[i].maxver; + rm->sclass[i].oclass = hw->sclass[i].oclass; + rm->sclass[i].ctor = r535_gr_obj_ctor; + } + + if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) { + kfree(rm); + return -ENOMEM; + } + + *pgr = &gr->base; + + return nvkm_gr_ctor(rm, device, type, inst, true, &gr->base); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c new file mode 100644 index 000000000000..f42879b2ea7e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -0,0 +1,2252 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#include "priv.h" + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +extern struct dentry *nouveau_debugfs_root; + +const struct nvkm_gsp_rm +r535_gsp_rm = { + .api = &r535_rm, +}; + +static void +r535_gsp_msgq_work(struct work_struct *work) +{ + struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work); + + mutex_lock(&gsp->cmdq.mutex); + if (*gsp->msgq.rptr != *gsp->msgq.wptr) + r535_gsp_msg_recv(gsp, 0, 0); + mutex_unlock(&gsp->cmdq.mutex); +} + +static irqreturn_t +r535_gsp_intr(struct nvkm_inth *inth) +{ + struct nvkm_gsp *gsp = container_of(inth, typeof(*gsp), subdev.inth); + struct nvkm_subdev *subdev = &gsp->subdev; + u32 intr = nvkm_falcon_rd32(&gsp->falcon, 0x0008); + u32 inte = nvkm_falcon_rd32(&gsp->falcon, gsp->falcon.func->addr2 + + gsp->falcon.func->riscv_irqmask); + u32 stat = intr & inte; + + if (!stat) { + nvkm_debug(subdev, "inte %08x %08x\n", intr, inte); + return IRQ_NONE; + } + + if (stat & 0x00000040) { + nvkm_falcon_wr32(&gsp->falcon, 0x004, 0x00000040); + schedule_work(&gsp->msgq.work); + stat &= ~0x00000040; + } + + if (stat) { + nvkm_error(subdev, "intr %08x\n", stat); + nvkm_falcon_wr32(&gsp->falcon, 0x014, stat); + nvkm_falcon_wr32(&gsp->falcon, 0x004, stat); + } + + nvkm_falcon_intr_retrigger(&gsp->falcon); + return IRQ_HANDLED; +} + +static int +r535_gsp_intr_get_table(struct nvkm_gsp *gsp) +{ + NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl; + int ret = 0; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl)); + if (WARN_ON(ret)) { + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return ret; + } + + for (unsigned i = 0; i < ctrl->tableLen; i++) { + enum nvkm_subdev_type type; + int inst; + + nvkm_debug(&gsp->subdev, + "%2d: engineIdx %3d pmcIntrMask %08x stall %08x nonStall %08x\n", i, + ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask, + ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall); + + switch (ctrl->table[i].engineIdx) { + case MC_ENGINE_IDX_GSP: + type = NVKM_SUBDEV_GSP; + inst = 0; + break; + case MC_ENGINE_IDX_DISP: + type = NVKM_ENGINE_DISP; + inst = 0; + break; + case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9: + type = NVKM_ENGINE_CE; + inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0; + break; + case MC_ENGINE_IDX_GR0: + type = NVKM_ENGINE_GR; + inst = 0; + break; + case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7: + type = NVKM_ENGINE_NVDEC; + inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0; + break; + case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2: + type = NVKM_ENGINE_NVENC; + inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC; + break; + case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7: + type = NVKM_ENGINE_NVJPG; + inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0; + break; + case MC_ENGINE_IDX_OFA0: + type = NVKM_ENGINE_OFA; + inst = 0; + break; + default: + continue; + } + + if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) { + ret = -ENOSPC; + break; + } + + gsp->intr[gsp->intr_nr].type = type; + gsp->intr[gsp->intr_nr].inst = inst; + gsp->intr[gsp->intr_nr].stall = ctrl->table[i].vectorStall; + gsp->intr[gsp->intr_nr].nonstall = ctrl->table[i].vectorNonStall; + gsp->intr_nr++; + } + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return ret; +} + +static int +r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) +{ + GspStaticConfigInfo *rpc; + int last_usable = -1; + + rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc)); + if (IS_ERR(rpc)) + return PTR_ERR(rpc); + + gsp->internal.client.object.client = &gsp->internal.client; + gsp->internal.client.object.parent = NULL; + gsp->internal.client.object.handle = rpc->hInternalClient; + gsp->internal.client.gsp = gsp; + + gsp->internal.device.object.client = &gsp->internal.client; + gsp->internal.device.object.parent = &gsp->internal.client.object; + gsp->internal.device.object.handle = rpc->hInternalDevice; + + gsp->internal.device.subdevice.client = &gsp->internal.client; + gsp->internal.device.subdevice.parent = &gsp->internal.device.object; + gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice; + + gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase; + gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase; + + for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) { + NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = + &rpc->fbRegionInfoParams.fbRegion[i]; + + nvkm_debug(&gsp->subdev, "fb region %d: " + "%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i, + reg->base, reg->limit, reg->reserved, reg->performance, + reg->supportCompressed, reg->supportISO, reg->bProtected); + + if (!reg->reserved && !reg->bProtected) { + if (reg->supportCompressed && reg->supportISO && + !WARN_ON_ONCE(gsp->fb.region_nr >= ARRAY_SIZE(gsp->fb.region))) { + const u64 size = (reg->limit + 1) - reg->base; + + gsp->fb.region[gsp->fb.region_nr].addr = reg->base; + gsp->fb.region[gsp->fb.region_nr].size = size; + gsp->fb.region_nr++; + } + + last_usable = i; + } + } + + if (last_usable >= 0) { + u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1; + + gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base; + } + + for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) { + if (rpc->gpcInfo.gpcMask & BIT(gpc)) { + gsp->gr.tpcs += hweight32(rpc->tpcInfo[gpc].tpcMask); + gsp->gr.gpcs++; + } + } + + nvkm_gsp_rpc_done(gsp, rpc); + return 0; +} + +static void +nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem) +{ + if (mem->data) { + /* + * Poison the buffer to catch any unexpected access from + * GSP-RM if the buffer was prematurely freed. + */ + memset(mem->data, 0xFF, mem->size); + + dma_free_coherent(mem->dev, mem->size, mem->data, mem->addr); + put_device(mem->dev); + + memset(mem, 0, sizeof(*mem)); + } +} + +/** + * nvkm_gsp_mem_ctor - constructor for nvkm_gsp_mem objects + * @gsp: gsp pointer + * @size: number of bytes to allocate + * @mem: nvkm_gsp_mem object to initialize + * + * Allocates a block of memory for use with GSP. + * + * This memory block can potentially out-live the driver's remove() callback, + * so we take a device reference to ensure its lifetime. The reference is + * dropped in the destructor. + */ +static int +nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem) +{ + mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); + if (WARN_ON(!mem->data)) + return -ENOMEM; + + mem->size = size; + mem->dev = get_device(gsp->subdev.device->dev); + + return 0; +} + +static int +r535_gsp_postinit(struct nvkm_gsp *gsp) +{ + struct nvkm_device *device = gsp->subdev.device; + int ret; + + ret = r535_gsp_rpc_get_gsp_static_info(gsp); + if (WARN_ON(ret)) + return ret; + + INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work); + + ret = r535_gsp_intr_get_table(gsp); + if (WARN_ON(ret)) + return ret; + + ret = nvkm_gsp_intr_stall(gsp, gsp->subdev.type, gsp->subdev.inst); + if (WARN_ON(ret < 0)) + return ret; + + ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &gsp->subdev, + r535_gsp_intr, &gsp->subdev.inth); + if (WARN_ON(ret)) + return ret; + + nvkm_inth_allow(&gsp->subdev.inth); + nvkm_wr32(device, 0x110004, 0x00000040); + + /* Release the DMA buffers that were needed only for boot and init */ + nvkm_gsp_mem_dtor(&gsp->boot.fw); + nvkm_gsp_mem_dtor(&gsp->libos); + + return ret; +} + +static int +r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend) +{ + rpc_unloading_guest_driver_v1F_07 *rpc; + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, sizeof(*rpc)); + if (IS_ERR(rpc)) + return PTR_ERR(rpc); + + if (suspend) { + rpc->bInPMTransition = 1; + rpc->bGc6Entering = 0; + rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; + } else { + rpc->bInPMTransition = 0; + rpc->bGc6Entering = 0; + rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0; + } + + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); +} + +enum registry_type { + REGISTRY_TABLE_ENTRY_TYPE_DWORD = 1, /* 32-bit unsigned integer */ + REGISTRY_TABLE_ENTRY_TYPE_BINARY = 2, /* Binary blob */ + REGISTRY_TABLE_ENTRY_TYPE_STRING = 3, /* Null-terminated string */ +}; + +/* An arbitrary limit to the length of a registry key */ +#define REGISTRY_MAX_KEY_LENGTH 64 + +/** + * struct registry_list_entry - linked list member for a registry key/value + * @head: list_head struct + * @type: dword, binary, or string + * @klen: the length of name of the key + * @vlen: the length of the value + * @key: the key name + * @dword: the data, if REGISTRY_TABLE_ENTRY_TYPE_DWORD + * @binary: the data, if TYPE_BINARY or TYPE_STRING + * + * Every registry key/value is represented internally by this struct. + * + * Type DWORD is a simple 32-bit unsigned integer, and its value is stored in + * @dword. + * + * Types BINARY and STRING are variable-length binary blobs. The only real + * difference between BINARY and STRING is that STRING is null-terminated and + * is expected to contain only printable characters. + * + * Note: it is technically possible to have multiple keys with the same name + * but different types, but this is not useful since GSP-RM expects keys to + * have only one specific type. + */ +struct registry_list_entry { + struct list_head head; + enum registry_type type; + size_t klen; + char key[REGISTRY_MAX_KEY_LENGTH]; + size_t vlen; + u32 dword; /* TYPE_DWORD */ + u8 binary[] __counted_by(vlen); /* TYPE_BINARY or TYPE_STRING */ +}; + +/** + * add_registry -- adds a registry entry + * @gsp: gsp pointer + * @key: name of the registry key + * @type: type of data + * @data: pointer to value + * @length: size of data, in bytes + * + * Adds a registry key/value pair to the registry database. + * + * This function collects the registry information in a linked list. After + * all registry keys have been added, build_registry() is used to create the + * RPC data structure. + * + * registry_rpc_size is a running total of the size of all registry keys. + * It's used to avoid an O(n) calculation of the size when the RPC is built. + * + * Returns 0 on success, or negative error code on error. + */ +static int add_registry(struct nvkm_gsp *gsp, const char *key, + enum registry_type type, const void *data, size_t length) +{ + struct registry_list_entry *reg; + const size_t nlen = strnlen(key, REGISTRY_MAX_KEY_LENGTH) + 1; + size_t alloc_size; /* extra bytes to alloc for binary or string value */ + + if (nlen > REGISTRY_MAX_KEY_LENGTH) + return -EINVAL; + + alloc_size = (type == REGISTRY_TABLE_ENTRY_TYPE_DWORD) ? 0 : length; + + reg = kmalloc(sizeof(*reg) + alloc_size, GFP_KERNEL); + if (!reg) + return -ENOMEM; + + switch (type) { + case REGISTRY_TABLE_ENTRY_TYPE_DWORD: + reg->dword = *(const u32 *)(data); + break; + case REGISTRY_TABLE_ENTRY_TYPE_BINARY: + case REGISTRY_TABLE_ENTRY_TYPE_STRING: + memcpy(reg->binary, data, alloc_size); + break; + default: + nvkm_error(&gsp->subdev, "unrecognized registry type %u for '%s'\n", + type, key); + kfree(reg); + return -EINVAL; + } + + memcpy(reg->key, key, nlen); + reg->klen = nlen; + reg->vlen = length; + reg->type = type; + + list_add_tail(®->head, &gsp->registry_list); + gsp->registry_rpc_size += sizeof(PACKED_REGISTRY_ENTRY) + nlen + alloc_size; + + return 0; +} + +static int add_registry_num(struct nvkm_gsp *gsp, const char *key, u32 value) +{ + return add_registry(gsp, key, REGISTRY_TABLE_ENTRY_TYPE_DWORD, + &value, sizeof(u32)); +} + +static int add_registry_string(struct nvkm_gsp *gsp, const char *key, const char *value) +{ + return add_registry(gsp, key, REGISTRY_TABLE_ENTRY_TYPE_STRING, + value, strlen(value) + 1); +} + +/** + * build_registry -- create the registry RPC data + * @gsp: gsp pointer + * @registry: pointer to the RPC payload to fill + * + * After all registry key/value pairs have been added, call this function to + * build the RPC. + * + * The registry RPC looks like this: + * + * +-----------------+ + * |NvU32 size; | + * |NvU32 numEntries;| + * +-----------------+ + * +----------------------------------------+ + * |PACKED_REGISTRY_ENTRY | + * +----------------------------------------+ + * |Null-terminated key (string) for entry 0| + * +----------------------------------------+ + * |Binary/string data value for entry 0 | (only if necessary) + * +----------------------------------------+ + * + * +----------------------------------------+ + * |PACKED_REGISTRY_ENTRY | + * +----------------------------------------+ + * |Null-terminated key (string) for entry 1| + * +----------------------------------------+ + * |Binary/string data value for entry 1 | (only if necessary) + * +----------------------------------------+ + * ... (and so on, one copy for each entry) + * + * + * The 'data' field of an entry is either a 32-bit integer (for type DWORD) + * or an offset into the PACKED_REGISTRY_TABLE (for types BINARY and STRING). + * + * All memory allocated by add_registry() is released. + */ +static void build_registry(struct nvkm_gsp *gsp, PACKED_REGISTRY_TABLE *registry) +{ + struct registry_list_entry *reg, *n; + size_t str_offset; + unsigned int i = 0; + + registry->numEntries = list_count_nodes(&gsp->registry_list); + str_offset = struct_size(registry, entries, registry->numEntries); + + list_for_each_entry_safe(reg, n, &gsp->registry_list, head) { + registry->entries[i].type = reg->type; + registry->entries[i].length = reg->vlen; + + /* Append the key name to the table */ + registry->entries[i].nameOffset = str_offset; + memcpy((void *)registry + str_offset, reg->key, reg->klen); + str_offset += reg->klen; + + switch (reg->type) { + case REGISTRY_TABLE_ENTRY_TYPE_DWORD: + registry->entries[i].data = reg->dword; + break; + case REGISTRY_TABLE_ENTRY_TYPE_BINARY: + case REGISTRY_TABLE_ENTRY_TYPE_STRING: + /* If the type is binary or string, also append the value */ + memcpy((void *)registry + str_offset, reg->binary, reg->vlen); + registry->entries[i].data = str_offset; + str_offset += reg->vlen; + break; + default: + break; + } + + i++; + list_del(®->head); + kfree(reg); + } + + /* Double-check that we calculated the sizes correctly */ + WARN_ON(gsp->registry_rpc_size != str_offset); + + registry->size = gsp->registry_rpc_size; +} + +/** + * clean_registry -- clean up registry memory in case of error + * @gsp: gsp pointer + * + * Call this function to clean up all memory allocated by add_registry() + * in case of error and build_registry() is not called. + */ +static void clean_registry(struct nvkm_gsp *gsp) +{ + struct registry_list_entry *reg, *n; + + list_for_each_entry_safe(reg, n, &gsp->registry_list, head) { + list_del(®->head); + kfree(reg); + } + + gsp->registry_rpc_size = sizeof(PACKED_REGISTRY_TABLE); +} + +MODULE_PARM_DESC(NVreg_RegistryDwords, + "A semicolon-separated list of key=integer pairs of GSP-RM registry keys"); +static char *NVreg_RegistryDwords; +module_param(NVreg_RegistryDwords, charp, 0400); + +/* dword only */ +struct nv_gsp_registry_entries { + const char *name; + u32 value; +}; + +/* + * r535_registry_entries - required registry entries for GSP-RM + * + * This array lists registry entries that are required for GSP-RM to + * function correctly. + * + * RMSecBusResetEnable - enables PCI secondary bus reset + * RMForcePcieConfigSave - forces GSP-RM to preserve PCI configuration + * registers on any PCI reset. + */ +static const struct nv_gsp_registry_entries r535_registry_entries[] = { + { "RMSecBusResetEnable", 1 }, + { "RMForcePcieConfigSave", 1 }, +}; +#define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries) + +/** + * strip - strips all characters in 'reject' from 's' + * @s: string to strip + * @reject: string of characters to remove + * + * 's' is modified. + * + * Returns the length of the new string. + */ +static size_t strip(char *s, const char *reject) +{ + char *p = s, *p2 = s; + size_t length = 0; + char c; + + do { + while ((c = *p2) && strchr(reject, c)) + p2++; + + *p++ = c = *p2++; + length++; + } while (c); + + return length; +} + +/** + * r535_gsp_rpc_set_registry - build registry RPC and call GSP-RM + * @gsp: gsp pointer + * + * The GSP-RM registry is a set of key/value pairs that configure some aspects + * of GSP-RM. The keys are strings, and the values are 32-bit integers. + * + * The registry is built from a combination of a static hard-coded list (see + * above) and entries passed on the driver's command line. + */ +static int +r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp) +{ + PACKED_REGISTRY_TABLE *rpc; + unsigned int i; + int ret; + + INIT_LIST_HEAD(&gsp->registry_list); + gsp->registry_rpc_size = sizeof(PACKED_REGISTRY_TABLE); + + for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) { + ret = add_registry_num(gsp, r535_registry_entries[i].name, + r535_registry_entries[i].value); + if (ret) + goto fail; + } + + /* + * The NVreg_RegistryDwords parameter is a string of key=value + * pairs separated by semicolons. We need to extract and trim each + * substring, and then parse the substring to extract the key and + * value. + */ + if (NVreg_RegistryDwords) { + char *p = kstrdup(NVreg_RegistryDwords, GFP_KERNEL); + char *start, *next = p, *equal; + + if (!p) { + ret = -ENOMEM; + goto fail; + } + + /* Remove any whitespace from the parameter string */ + strip(p, " \t\n"); + + while ((start = strsep(&next, ";"))) { + long value; + + equal = strchr(start, '='); + if (!equal || equal == start || equal[1] == 0) { + nvkm_error(&gsp->subdev, + "ignoring invalid registry string '%s'\n", + start); + continue; + } + + /* Truncate the key=value string to just key */ + *equal = 0; + + ret = kstrtol(equal + 1, 0, &value); + if (!ret) { + ret = add_registry_num(gsp, start, value); + } else { + /* Not a number, so treat it as a string */ + ret = add_registry_string(gsp, start, equal + 1); + } + + if (ret) { + nvkm_error(&gsp->subdev, + "ignoring invalid registry key/value '%s=%s'\n", + start, equal + 1); + continue; + } + } + + kfree(p); + } + + rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_SET_REGISTRY, gsp->registry_rpc_size); + if (IS_ERR(rpc)) { + ret = PTR_ERR(rpc); + goto fail; + } + + build_registry(gsp, rpc); + + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT); + +fail: + clean_registry(gsp); + return ret; +} + +#if defined(CONFIG_ACPI) && defined(CONFIG_X86) +static void +r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) +{ + const guid_t NVOP_DSM_GUID = + GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B, + 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0); + u64 NVOP_DSM_REV = 0x00000100; + union acpi_object argv4 = { + .buffer.type = ACPI_TYPE_BUFFER, + .buffer.length = 4, + .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), + }, *obj; + + caps->status = 0xffff; + + if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a))) + return; + + obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4); + if (!obj) + return; + + if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || + WARN_ON(obj->buffer.length != 4)) + return; + + caps->status = 0; + caps->optimusCaps = *(u32 *)obj->buffer.pointer; + + ACPI_FREE(obj); + + kfree(argv4.buffer.pointer); +} + +static void +r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) +{ + const guid_t JT_DSM_GUID = + GUID_INIT(0xCBECA351L, 0x067B, 0x4924, + 0x9C, 0xBD, 0xB4, 0x6B, 0x00, 0xB8, 0x6F, 0x34); + u64 JT_DSM_REV = 0x00000103; + u32 caps; + union acpi_object argv4 = { + .buffer.type = ACPI_TYPE_BUFFER, + .buffer.length = sizeof(caps), + .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), + }, *obj; + + jt->status = 0xffff; + + obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4); + if (!obj) + return; + + if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || + WARN_ON(obj->buffer.length != 4)) + return; + + jt->status = 0; + jt->jtCaps = *(u32 *)obj->buffer.pointer; + jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20; + jt->bSBIOSCaps = 0; + + ACPI_FREE(obj); + + kfree(argv4.buffer.pointer); +} + +static void +r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode, + MUX_METHOD_DATA_ELEMENT *part) +{ + union acpi_object mux_arg = { ACPI_TYPE_INTEGER }; + struct acpi_object_list input = { 1, &mux_arg }; + acpi_handle iter = NULL, handle_mux = NULL; + acpi_status status; + unsigned long long value; + + mode->status = 0xffff; + part->status = 0xffff; + + do { + status = acpi_get_next_object(ACPI_TYPE_DEVICE, handle, iter, &iter); + if (ACPI_FAILURE(status) || !iter) + return; + + status = acpi_evaluate_integer(iter, "_ADR", NULL, &value); + if (ACPI_FAILURE(status) || value != id) + continue; + + handle_mux = iter; + } while (!handle_mux); + + if (!handle_mux) + return; + + /* I -think- 0 means "acquire" according to nvidia's driver source */ + input.pointer->integer.type = ACPI_TYPE_INTEGER; + input.pointer->integer.value = 0; + + status = acpi_evaluate_integer(handle_mux, "MXDM", &input, &value); + if (ACPI_SUCCESS(status)) { + mode->acpiId = id; + mode->mode = value; + mode->status = 0; + } + + status = acpi_evaluate_integer(handle_mux, "MXDS", &input, &value); + if (ACPI_SUCCESS(status)) { + part->acpiId = id; + part->mode = value; + part->status = 0; + } +} + +static void +r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux) +{ + mux->tableLen = dod->acpiIdListLen / sizeof(dod->acpiIdList[0]); + + for (int i = 0; i < mux->tableLen; i++) { + r535_gsp_acpi_mux_id(handle, dod->acpiIdList[i], &mux->acpiIdMuxModeTable[i], + &mux->acpiIdMuxPartTable[i]); + } +} + +static void +r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod) +{ + acpi_status status; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *_DOD; + + dod->status = 0xffff; + + status = acpi_evaluate_object(handle, "_DOD", NULL, &output); + if (ACPI_FAILURE(status)) + return; + + _DOD = output.pointer; + + if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) || + WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList))) + return; + + for (int i = 0; i < _DOD->package.count; i++) { + if (WARN_ON(_DOD->package.elements[i].type != ACPI_TYPE_INTEGER)) + return; + + dod->acpiIdList[i] = _DOD->package.elements[i].integer.value; + dod->acpiIdListLen += sizeof(dod->acpiIdList[0]); + } + + dod->status = 0; + kfree(output.pointer); +} +#endif + +static void +r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi) +{ +#if defined(CONFIG_ACPI) && defined(CONFIG_X86) + acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev); + + if (!handle) + return; + + acpi->bValid = 1; + + r535_gsp_acpi_dod(handle, &acpi->dodMethodData); + if (acpi->dodMethodData.status == 0) + r535_gsp_acpi_mux(handle, &acpi->dodMethodData, &acpi->muxMethodData); + + r535_gsp_acpi_jt(handle, &acpi->jtMethodData); + r535_gsp_acpi_caps(handle, &acpi->capsMethodData); +#endif +} + +static int +r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp) +{ + struct nvkm_device *device = gsp->subdev.device; + struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device); + GspSystemInfo *info; + + if (WARN_ON(device->type == NVKM_DEVICE_TEGRA)) + return -ENOSYS; + + info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info)); + if (IS_ERR(info)) + return PTR_ERR(info); + + info->gpuPhysAddr = device->func->resource_addr(device, 0); + info->gpuPhysFbAddr = device->func->resource_addr(device, 1); + info->gpuPhysInstAddr = device->func->resource_addr(device, 3); + info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev); + info->maxUserVa = TASK_SIZE; + info->pciConfigMirrorBase = 0x088000; + info->pciConfigMirrorSize = 0x001000; + r535_gsp_acpi_info(gsp, &info->acpiMethodData); + + return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT); +} + +static int +r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc) +{ + struct nvkm_gsp *gsp = priv; + struct nvkm_subdev *subdev = &gsp->subdev; + rpc_os_error_log_v17_00 *msg = repv; + + if (WARN_ON(repc < sizeof(*msg))) + return -EINVAL; + + nvkm_error(subdev, "Xid:%d %s\n", msg->exceptType, msg->errString); + return 0; +} + +static int +r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc) +{ + rpc_rc_triggered_v17_02 *msg = repv; + struct nvkm_gsp *gsp = priv; + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_chan *chan; + unsigned long flags; + + if (WARN_ON(repc < sizeof(*msg))) + return -EINVAL; + + nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n", + msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope, + msg->partitionAttributionId); + + chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid, &flags); + if (!chan) { + nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid); + return 0; + } + + nvkm_chan_error(chan, false); + nvkm_chan_put(&chan, flags); + return 0; +} + +static int +r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc) +{ + struct nvkm_gsp *gsp = priv; + struct nvkm_subdev *subdev = &gsp->subdev; + + WARN_ON(repc != 0); + + nvkm_error(subdev, "mmu fault queued\n"); + return 0; +} + +static int +r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc) +{ + struct nvkm_gsp *gsp = priv; + struct nvkm_gsp_client *client; + struct nvkm_subdev *subdev = &gsp->subdev; + rpc_post_event_v17_00 *msg = repv; + + if (WARN_ON(repc < sizeof(*msg))) + return -EINVAL; + if (WARN_ON(repc != sizeof(*msg) + msg->eventDataSize)) + return -EINVAL; + + nvkm_debug(subdev, "event: %08x %08x %d %08x %08x %d %d\n", + msg->hClient, msg->hEvent, msg->notifyIndex, msg->data, + msg->status, msg->eventDataSize, msg->bNotifyList); + + mutex_lock(&gsp->client_id.mutex); + client = idr_find(&gsp->client_id.idr, msg->hClient & 0xffff); + if (client) { + struct nvkm_gsp_event *event; + bool handled = false; + + list_for_each_entry(event, &client->events, head) { + if (event->object.handle == msg->hEvent) { + event->func(event, msg->eventData, msg->eventDataSize); + handled = true; + } + } + + if (!handled) { + nvkm_error(subdev, "event: cid 0x%08x event 0x%08x not found!\n", + msg->hClient, msg->hEvent); + } + } else { + nvkm_error(subdev, "event: cid 0x%08x not found!\n", msg->hClient); + } + mutex_unlock(&gsp->client_id.mutex); + return 0; +} + +/** + * r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP + * @priv: gsp pointer + * @fn: function number (ignored) + * @repv: pointer to libos print RPC + * @repc: message size + * + * The GSP sequencer is a list of I/O commands that the GSP can send to + * the driver to perform for various purposes. The most common usage is to + * perform a special mid-initialization reset. + */ +static int +r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc) +{ + struct nvkm_gsp *gsp = priv; + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + rpc_run_cpu_sequencer_v17_00 *seq = repv; + int ptr = 0, ret; + + nvkm_debug(subdev, "seq: %08x %08x\n", seq->bufferSizeDWord, seq->cmdIndex); + + while (ptr < seq->cmdIndex) { + GSP_SEQUENCER_BUFFER_CMD *cmd = (void *)&seq->commandBuffer[ptr]; + + ptr += 1; + ptr += GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(cmd->opCode); + + switch (cmd->opCode) { + case GSP_SEQ_BUF_OPCODE_REG_WRITE: { + u32 addr = cmd->payload.regWrite.addr; + u32 data = cmd->payload.regWrite.val; + + nvkm_trace(subdev, "seq wr32 %06x %08x\n", addr, data); + nvkm_wr32(device, addr, data); + } + break; + case GSP_SEQ_BUF_OPCODE_REG_MODIFY: { + u32 addr = cmd->payload.regModify.addr; + u32 mask = cmd->payload.regModify.mask; + u32 data = cmd->payload.regModify.val; + + nvkm_trace(subdev, "seq mask %06x %08x %08x\n", addr, mask, data); + nvkm_mask(device, addr, mask, data); + } + break; + case GSP_SEQ_BUF_OPCODE_REG_POLL: { + u32 addr = cmd->payload.regPoll.addr; + u32 mask = cmd->payload.regPoll.mask; + u32 data = cmd->payload.regPoll.val; + u32 usec = cmd->payload.regPoll.timeout ?: 4000000; + //u32 error = cmd->payload.regPoll.error; + + nvkm_trace(subdev, "seq poll %06x %08x %08x %d\n", addr, mask, data, usec); + nvkm_rd32(device, addr); + nvkm_usec(device, usec, + if ((nvkm_rd32(device, addr) & mask) == data) + break; + ); + } + break; + case GSP_SEQ_BUF_OPCODE_DELAY_US: { + u32 usec = cmd->payload.delayUs.val; + + nvkm_trace(subdev, "seq usec %d\n", usec); + udelay(usec); + } + break; + case GSP_SEQ_BUF_OPCODE_REG_STORE: { + u32 addr = cmd->payload.regStore.addr; + u32 slot = cmd->payload.regStore.index; + + seq->regSaveArea[slot] = nvkm_rd32(device, addr); + nvkm_trace(subdev, "seq save %08x -> %d: %08x\n", addr, slot, + seq->regSaveArea[slot]); + } + break; + case GSP_SEQ_BUF_OPCODE_CORE_RESET: + nvkm_trace(subdev, "seq core reset\n"); + nvkm_falcon_reset(&gsp->falcon); + nvkm_falcon_mask(&gsp->falcon, 0x624, 0x00000080, 0x00000080); + nvkm_falcon_wr32(&gsp->falcon, 0x10c, 0x00000000); + break; + case GSP_SEQ_BUF_OPCODE_CORE_START: + nvkm_trace(subdev, "seq core start\n"); + if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000040) + nvkm_falcon_wr32(&gsp->falcon, 0x130, 0x00000002); + else + nvkm_falcon_wr32(&gsp->falcon, 0x100, 0x00000002); + break; + case GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT: + nvkm_trace(subdev, "seq core wait halt\n"); + nvkm_msec(device, 2000, + if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000010) + break; + ); + break; + case GSP_SEQ_BUF_OPCODE_CORE_RESUME: { + struct nvkm_sec2 *sec2 = device->sec2; + u32 mbox0; + + nvkm_trace(subdev, "seq core resume\n"); + + ret = gsp->func->reset(gsp); + if (WARN_ON(ret)) + return ret; + + nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); + nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); + + nvkm_falcon_start(&sec2->falcon); + + if (nvkm_msec(device, 2000, + if (nvkm_rd32(device, 0x1180f8) & 0x04000000) + break; + ) < 0) + return -ETIMEDOUT; + + mbox0 = nvkm_falcon_rd32(&sec2->falcon, 0x040); + if (WARN_ON(mbox0)) { + nvkm_error(&gsp->subdev, "seq core resume sec2: 0x%x\n", mbox0); + return -EIO; + } + + nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); + + if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) + return -EIO; + } + break; + default: + nvkm_error(subdev, "unknown sequencer opcode %08x\n", cmd->opCode); + return -EINVAL; + } + } + + return 0; +} + +static int +r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp) +{ + GspFwWprMeta *meta; + int ret; + + ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta); + if (ret) + return ret; + + meta = gsp->wpr_meta.data; + + meta->magic = GSP_FW_WPR_META_MAGIC; + meta->revision = GSP_FW_WPR_META_REVISION; + + meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr; + meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size; + + meta->sysmemAddrOfBootloader = gsp->boot.fw.addr; + meta->sizeOfBootloader = gsp->boot.fw.size; + meta->bootloaderCodeOffset = gsp->boot.code_offset; + meta->bootloaderDataOffset = gsp->boot.data_offset; + meta->bootloaderManifestOffset = gsp->boot.manifest_offset; + + meta->sysmemAddrOfSignature = gsp->sig.addr; + meta->sizeOfSignature = gsp->sig.size; + + meta->gspFwRsvdStart = gsp->fb.heap.addr; + meta->nonWprHeapOffset = gsp->fb.heap.addr; + meta->nonWprHeapSize = gsp->fb.heap.size; + meta->gspFwWprStart = gsp->fb.wpr2.addr; + meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr; + meta->gspFwHeapSize = gsp->fb.wpr2.heap.size; + meta->gspFwOffset = gsp->fb.wpr2.elf.addr; + meta->bootBinOffset = gsp->fb.wpr2.boot.addr; + meta->frtsOffset = gsp->fb.wpr2.frts.addr; + meta->frtsSize = gsp->fb.wpr2.frts.size; + meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000); + meta->fbSize = gsp->fb.size; + meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr; + meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size; + meta->bootCount = 0; + meta->partitionRpcAddr = 0; + meta->partitionRpcRequestOffset = 0; + meta->partitionRpcReplyOffset = 0; + meta->verified = 0; + return 0; +} + +static int +r535_gsp_shared_init(struct nvkm_gsp *gsp) +{ + struct { + msgqTxHeader tx; + msgqRxHeader rx; + } *cmdq, *msgq; + int ret, i; + + gsp->shm.cmdq.size = 0x40000; + gsp->shm.msgq.size = 0x40000; + + gsp->shm.ptes.nr = (gsp->shm.cmdq.size + gsp->shm.msgq.size) >> GSP_PAGE_SHIFT; + gsp->shm.ptes.nr += DIV_ROUND_UP(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); + gsp->shm.ptes.size = ALIGN(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); + + ret = nvkm_gsp_mem_ctor(gsp, gsp->shm.ptes.size + + gsp->shm.cmdq.size + + gsp->shm.msgq.size, + &gsp->shm.mem); + if (ret) + return ret; + + gsp->shm.ptes.ptr = gsp->shm.mem.data; + gsp->shm.cmdq.ptr = (u8 *)gsp->shm.ptes.ptr + gsp->shm.ptes.size; + gsp->shm.msgq.ptr = (u8 *)gsp->shm.cmdq.ptr + gsp->shm.cmdq.size; + + for (i = 0; i < gsp->shm.ptes.nr; i++) + gsp->shm.ptes.ptr[i] = gsp->shm.mem.addr + (i << GSP_PAGE_SHIFT); + + cmdq = gsp->shm.cmdq.ptr; + cmdq->tx.version = 0; + cmdq->tx.size = gsp->shm.cmdq.size; + cmdq->tx.entryOff = GSP_PAGE_SIZE; + cmdq->tx.msgSize = GSP_PAGE_SIZE; + cmdq->tx.msgCount = (cmdq->tx.size - cmdq->tx.entryOff) / cmdq->tx.msgSize; + cmdq->tx.writePtr = 0; + cmdq->tx.flags = 1; + cmdq->tx.rxHdrOff = offsetof(typeof(*cmdq), rx.readPtr); + + msgq = gsp->shm.msgq.ptr; + + gsp->cmdq.cnt = cmdq->tx.msgCount; + gsp->cmdq.wptr = &cmdq->tx.writePtr; + gsp->cmdq.rptr = &msgq->rx.readPtr; + gsp->msgq.cnt = cmdq->tx.msgCount; + gsp->msgq.wptr = &msgq->tx.writePtr; + gsp->msgq.rptr = &cmdq->rx.readPtr; + return 0; +} + +int +r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume) +{ + GSP_ARGUMENTS_CACHED *args; + int ret; + + if (!resume) { + ret = r535_gsp_shared_init(gsp); + if (ret) + return ret; + + ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs); + if (ret) + return ret; + } + + args = gsp->rmargs.data; + args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr; + args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr; + args->messageQueueInitArguments.cmdQueueOffset = + (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data; + args->messageQueueInitArguments.statQueueOffset = + (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data; + + if (!resume) { + args->srInitArguments.oldLevel = 0; + args->srInitArguments.flags = 0; + args->srInitArguments.bInPMTransition = 0; + } else { + args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; + args->srInitArguments.flags = 0; + args->srInitArguments.bInPMTransition = 1; + } + + return 0; +} + +#ifdef CONFIG_DEBUG_FS + +/* + * If GSP-RM load fails, then the GSP nvkm object will be deleted, the logging + * debugfs entries will be deleted, and it will not be possible to debug the + * load failure. The keep_gsp_logging parameter tells Nouveau to copy the + * logging buffers to new debugfs entries, and these entries are retained + * until the driver unloads. + */ +static bool keep_gsp_logging; +module_param(keep_gsp_logging, bool, 0444); +MODULE_PARM_DESC(keep_gsp_logging, + "Migrate the GSP-RM logging debugfs entries upon exit"); + +/* + * GSP-RM uses a pseudo-class mechanism to define of a variety of per-"engine" + * data structures, and each engine has a "class ID" genererated by a + * pre-processor. This is the class ID for the PMU. + */ +#define NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU 0xf3d722 + +/** + * struct rpc_ucode_libos_print_v1e_08 - RPC payload for libos print buffers + * @ucode_eng_desc: the engine descriptor + * @libos_print_buf_size: the size of the libos_print_buf[] + * @libos_print_buf: the actual buffer + * + * The engine descriptor is divided into 31:8 "class ID" and 7:0 "instance + * ID". We only care about messages from PMU. + */ +struct rpc_ucode_libos_print_v1e_08 { + u32 ucode_eng_desc; + u32 libos_print_buf_size; + u8 libos_print_buf[]; +}; + +/** + * r535_gsp_msg_libos_print - capture log message from the PMU + * @priv: gsp pointer + * @fn: function number (ignored) + * @repv: pointer to libos print RPC + * @repc: message size + * + * Called when we receive a UCODE_LIBOS_PRINT event RPC from GSP-RM. This RPC + * contains the contents of the libos print buffer from PMU. It is typically + * only written to when PMU encounters an error. + * + * Technically this RPC can be used to pass print buffers from any number of + * GSP-RM engines, but we only expect to receive them for the PMU. + * + * For the PMU, the buffer is 4K in size and the RPC always contains the full + * contents. + */ +static int +r535_gsp_msg_libos_print(void *priv, u32 fn, void *repv, u32 repc) +{ + struct nvkm_gsp *gsp = priv; + struct nvkm_subdev *subdev = &gsp->subdev; + struct rpc_ucode_libos_print_v1e_08 *rpc = repv; + unsigned int class = rpc->ucode_eng_desc >> 8; + + nvkm_debug(subdev, "received libos print from class 0x%x for %u bytes\n", + class, rpc->libos_print_buf_size); + + if (class != NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU) { + nvkm_warn(subdev, + "received libos print from unknown class 0x%x\n", + class); + return -ENOMSG; + } + + if (rpc->libos_print_buf_size > GSP_PAGE_SIZE) { + nvkm_error(subdev, "libos print is too large (%u bytes)\n", + rpc->libos_print_buf_size); + return -E2BIG; + } + + memcpy(gsp->blob_pmu.data, rpc->libos_print_buf, rpc->libos_print_buf_size); + + return 0; +} + +/** + * create_debugfs - create a blob debugfs entry + * @gsp: gsp pointer + * @name: name of this dentry + * @blob: blob wrapper + * + * Creates a debugfs entry for a logging buffer with the name 'name'. + */ +static struct dentry *create_debugfs(struct nvkm_gsp *gsp, const char *name, + struct debugfs_blob_wrapper *blob) +{ + struct dentry *dent; + + dent = debugfs_create_blob(name, 0444, gsp->debugfs.parent, blob); + if (IS_ERR(dent)) { + nvkm_error(&gsp->subdev, + "failed to create %s debugfs entry\n", name); + return NULL; + } + + /* + * For some reason, debugfs_create_blob doesn't set the size of the + * dentry, so do that here. See [1] + * + * [1] https://lore.kernel.org/r/linux-fsdevel/20240207200619.3354549-1-ttabi@nvidia.com/ + */ + i_size_write(d_inode(dent), blob->size); + + return dent; +} + +/** + * r535_gsp_libos_debugfs_init - create logging debugfs entries + * @gsp: gsp pointer + * + * Create the debugfs entries. This exposes the log buffers to userspace so + * that an external tool can parse it. + * + * The 'logpmu' contains exception dumps from the PMU. It is written via an + * RPC sent from GSP-RM and must be only 4KB. We create it here because it's + * only useful if there is a debugfs entry to expose it. If we get the PMU + * logging RPC and there is no debugfs entry, the RPC is just ignored. + * + * The blob_init, blob_rm, and blob_pmu objects can't be transient + * because debugfs_create_blob doesn't copy them. + * + * NOTE: OpenRM loads the logging elf image and prints the log messages + * in real-time. We may add that capability in the future, but that + * requires loading ELF images that are not distributed with the driver and + * adding the parsing code to Nouveau. + * + * Ideally, this should be part of nouveau_debugfs_init(), but that function + * is called too late. We really want to create these debugfs entries before + * r535_gsp_booter_load() is called, so that if GSP-RM fails to initialize, + * there could still be a log to capture. + */ +static void +r535_gsp_libos_debugfs_init(struct nvkm_gsp *gsp) +{ + struct device *dev = gsp->subdev.device->dev; + + /* Create a new debugfs directory with a name unique to this GPU. */ + gsp->debugfs.parent = debugfs_create_dir(dev_name(dev), nouveau_debugfs_root); + if (IS_ERR(gsp->debugfs.parent)) { + nvkm_error(&gsp->subdev, + "failed to create %s debugfs root\n", dev_name(dev)); + return; + } + + gsp->blob_init.data = gsp->loginit.data; + gsp->blob_init.size = gsp->loginit.size; + gsp->blob_intr.data = gsp->logintr.data; + gsp->blob_intr.size = gsp->logintr.size; + gsp->blob_rm.data = gsp->logrm.data; + gsp->blob_rm.size = gsp->logrm.size; + + gsp->debugfs.init = create_debugfs(gsp, "loginit", &gsp->blob_init); + if (!gsp->debugfs.init) + goto error; + + gsp->debugfs.intr = create_debugfs(gsp, "logintr", &gsp->blob_intr); + if (!gsp->debugfs.intr) + goto error; + + gsp->debugfs.rm = create_debugfs(gsp, "logrm", &gsp->blob_rm); + if (!gsp->debugfs.rm) + goto error; + + /* + * Since the PMU buffer is copied from an RPC, it doesn't need to be + * a DMA buffer. + */ + gsp->blob_pmu.size = GSP_PAGE_SIZE; + gsp->blob_pmu.data = kzalloc(gsp->blob_pmu.size, GFP_KERNEL); + if (!gsp->blob_pmu.data) + goto error; + + gsp->debugfs.pmu = create_debugfs(gsp, "logpmu", &gsp->blob_pmu); + if (!gsp->debugfs.pmu) { + kfree(gsp->blob_pmu.data); + goto error; + } + + i_size_write(d_inode(gsp->debugfs.init), gsp->blob_init.size); + i_size_write(d_inode(gsp->debugfs.intr), gsp->blob_intr.size); + i_size_write(d_inode(gsp->debugfs.rm), gsp->blob_rm.size); + i_size_write(d_inode(gsp->debugfs.pmu), gsp->blob_pmu.size); + + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, + r535_gsp_msg_libos_print, gsp); + + nvkm_debug(&gsp->subdev, "created debugfs GSP-RM logging entries\n"); + + if (keep_gsp_logging) { + nvkm_info(&gsp->subdev, + "logging buffers will be retained on failure\n"); + } + + return; + +error: + debugfs_remove(gsp->debugfs.parent); + gsp->debugfs.parent = NULL; +} + +#endif + +static inline u64 +r535_gsp_libos_id8(const char *name) +{ + u64 id = 0; + + for (int i = 0; i < sizeof(id) && *name; i++, name++) + id = (id << 8) | *name; + + return id; +} + +/** + * create_pte_array() - creates a PTE array of a physically contiguous buffer + * @ptes: pointer to the array + * @addr: base address of physically contiguous buffer (GSP_PAGE_SIZE aligned) + * @size: size of the buffer + * + * GSP-RM sometimes expects physically-contiguous buffers to have an array of + * "PTEs" for each page in that buffer. Although in theory that allows for + * the buffer to be physically discontiguous, GSP-RM does not currently + * support that. + * + * In this case, the PTEs are DMA addresses of each page of the buffer. Since + * the buffer is physically contiguous, calculating all the PTEs is simple + * math. + * + * See memdescGetPhysAddrsForGpu() + */ +static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size) +{ + unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE); + unsigned int i; + + for (i = 0; i < num_pages; i++) + ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT); +} + +/** + * r535_gsp_libos_init() -- create the libos arguments structure + * @gsp: gsp pointer + * + * The logging buffers are byte queues that contain encoded printf-like + * messages from GSP-RM. They need to be decoded by a special application + * that can parse the buffers. + * + * The 'loginit' buffer contains logs from early GSP-RM init and + * exception dumps. The 'logrm' buffer contains the subsequent logs. Both are + * written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE. + * + * The physical address map for the log buffer is stored in the buffer + * itself, starting with offset 1. Offset 0 contains the "put" pointer (pp). + * Initially, pp is equal to 0. If the buffer has valid logging data in it, + * then pp points to index into the buffer where the next logging entry will + * be written. Therefore, the logging data is valid if: + * 1 <= pp < sizeof(buffer)/sizeof(u64) + * + * The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is + * configured for a larger page size (e.g. 64K pages), we need to give + * the GSP an array of 4K pages. Fortunately, since the buffer is + * physically contiguous, it's simple math to calculate the addresses. + * + * The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently + * ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the + * buffers to be physically contiguous anyway. + * + * The memory allocated for the arguments must remain until the GSP sends the + * init_done RPC. + * + * See _kgspInitLibosLoggingStructures (allocates memory for buffers) + * See kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array) + */ +static int +r535_gsp_libos_init(struct nvkm_gsp *gsp) +{ + LibosMemoryRegionInitArgument *args; + int ret; + + ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->libos); + if (ret) + return ret; + + args = gsp->libos.data; + + ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->loginit); + if (ret) + return ret; + + args[0].id8 = r535_gsp_libos_id8("LOGINIT"); + args[0].pa = gsp->loginit.addr; + args[0].size = gsp->loginit.size; + args[0].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; + args[0].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; + create_pte_array(gsp->loginit.data + sizeof(u64), gsp->loginit.addr, gsp->loginit.size); + + ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logintr); + if (ret) + return ret; + + args[1].id8 = r535_gsp_libos_id8("LOGINTR"); + args[1].pa = gsp->logintr.addr; + args[1].size = gsp->logintr.size; + args[1].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; + args[1].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; + create_pte_array(gsp->logintr.data + sizeof(u64), gsp->logintr.addr, gsp->logintr.size); + + ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logrm); + if (ret) + return ret; + + args[2].id8 = r535_gsp_libos_id8("LOGRM"); + args[2].pa = gsp->logrm.addr; + args[2].size = gsp->logrm.size; + args[2].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; + args[2].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; + create_pte_array(gsp->logrm.data + sizeof(u64), gsp->logrm.addr, gsp->logrm.size); + + ret = r535_gsp_rmargs_init(gsp, false); + if (ret) + return ret; + + args[3].id8 = r535_gsp_libos_id8("RMARGS"); + args[3].pa = gsp->rmargs.addr; + args[3].size = gsp->rmargs.size; + args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; + args[3].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; + +#ifdef CONFIG_DEBUG_FS + r535_gsp_libos_debugfs_init(gsp); +#endif + + return 0; +} + +void +nvkm_gsp_sg_free(struct nvkm_device *device, struct sg_table *sgt) +{ + struct scatterlist *sgl; + int i; + + dma_unmap_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0); + + for_each_sgtable_sg(sgt, sgl, i) { + struct page *page = sg_page(sgl); + + __free_page(page); + } + + sg_free_table(sgt); +} + +int +nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt) +{ + const u64 pages = DIV_ROUND_UP(size, PAGE_SIZE); + struct scatterlist *sgl; + int ret, i; + + ret = sg_alloc_table(sgt, pages, GFP_KERNEL); + if (ret) + return ret; + + for_each_sgtable_sg(sgt, sgl, i) { + struct page *page = alloc_page(GFP_KERNEL); + + if (!page) { + nvkm_gsp_sg_free(device, sgt); + return -ENOMEM; + } + + sg_set_page(sgl, page, PAGE_SIZE, 0); + } + + ret = dma_map_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0); + if (ret) + nvkm_gsp_sg_free(device, sgt); + + return ret; +} + +static void +nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3) +{ + nvkm_gsp_sg_free(gsp->subdev.device, &rx3->lvl2); + nvkm_gsp_mem_dtor(&rx3->lvl1); + nvkm_gsp_mem_dtor(&rx3->lvl0); +} + +/** + * nvkm_gsp_radix3_sg - build a radix3 table from a S/G list + * @gsp: gsp pointer + * @sgt: S/G list to traverse + * @size: size of the image, in bytes + * @rx3: radix3 array to update + * + * The GSP uses a three-level page table, called radix3, to map the firmware. + * Each 64-bit "pointer" in the table is either the bus address of an entry in + * the next table (for levels 0 and 1) or the bus address of the next page in + * the GSP firmware image itself. + * + * Level 0 contains a single entry in one page that points to the first page + * of level 1. + * + * Level 1, since it's also only one page in size, contains up to 512 entries, + * one for each page in Level 2. + * + * Level 2 can be up to 512 pages in size, and each of those entries points to + * the next page of the firmware image. Since there can be up to 512*512 + * pages, that limits the size of the firmware to 512*512*GSP_PAGE_SIZE = 1GB. + * + * Internally, the GSP has its window into system memory, but the base + * physical address of the aperture is not 0. In fact, it varies depending on + * the GPU architecture. Since the GPU is a PCI device, this window is + * accessed via DMA and is therefore bound by IOMMU translation. The end + * result is that GSP-RM must translate the bus addresses in the table to GSP + * physical addresses. All this should happen transparently. + * + * Returns 0 on success, or negative error code + * + * See kgspCreateRadix3_IMPL + */ +static int +nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size, + struct nvkm_gsp_radix3 *rx3) +{ + struct sg_dma_page_iter sg_dma_iter; + struct scatterlist *sg; + size_t bufsize; + u64 *pte; + int ret, i, page_idx = 0; + + ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl0); + if (ret) + return ret; + + ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl1); + if (ret) + goto lvl1_fail; + + // Allocate level 2 + bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); + ret = nvkm_gsp_sg(gsp->subdev.device, bufsize, &rx3->lvl2); + if (ret) + goto lvl2_fail; + + // Write the bus address of level 1 to level 0 + pte = rx3->lvl0.data; + *pte = rx3->lvl1.addr; + + // Write the bus address of each page in level 2 to level 1 + pte = rx3->lvl1.data; + for_each_sgtable_dma_page(&rx3->lvl2, &sg_dma_iter, 0) + *pte++ = sg_page_iter_dma_address(&sg_dma_iter); + + // Finally, write the bus address of each page in sgt to level 2 + for_each_sgtable_sg(&rx3->lvl2, sg, i) { + void *sgl_end; + + pte = sg_virt(sg); + sgl_end = (void *)pte + sg->length; + + for_each_sgtable_dma_page(sgt, &sg_dma_iter, page_idx) { + *pte++ = sg_page_iter_dma_address(&sg_dma_iter); + page_idx++; + + // Go to the next scatterlist for level 2 if we've reached the end + if ((void *)pte >= sgl_end) + break; + } + } + + if (ret) { +lvl2_fail: + nvkm_gsp_mem_dtor(&rx3->lvl1); +lvl1_fail: + nvkm_gsp_mem_dtor(&rx3->lvl0); + } + + return ret; +} + +int +r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) +{ + int ret; + + if (suspend) { + GspFwWprMeta *meta = gsp->wpr_meta.data; + u64 len = meta->gspFwWprEnd - meta->gspFwWprStart; + GspFwSRMeta *sr; + + ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt); + if (ret) + return ret; + + ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3); + if (ret) + return ret; + + ret = nvkm_gsp_mem_ctor(gsp, sizeof(*sr), &gsp->sr.meta); + if (ret) + return ret; + + sr = gsp->sr.meta.data; + sr->magic = GSP_FW_SR_META_MAGIC; + sr->revision = GSP_FW_SR_META_REVISION; + sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr; + sr->sizeOfSuspendResumeData = len; + } + + ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend); + if (WARN_ON(ret)) + return ret; + + nvkm_msec(gsp->subdev.device, 2000, + if (nvkm_falcon_rd32(&gsp->falcon, 0x040) == 0x80000000) + break; + ); + + gsp->running = false; + return 0; +} + +int +r535_gsp_init(struct nvkm_gsp *gsp) +{ + int ret; + + nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); + + if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) + return -EIO; + + ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE); + if (ret) + goto done; + + gsp->running = true; + +done: + if (gsp->sr.meta.data) { + nvkm_gsp_mem_dtor(&gsp->sr.meta); + nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3); + nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt); + return ret; + } + + if (ret == 0) + ret = r535_gsp_postinit(gsp); + + return ret; +} + +static int +r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp) +{ + const struct firmware *fw = gsp->fws.bl; + const struct nvfw_bin_hdr *hdr; + RM_RISCV_UCODE_DESC *desc; + int ret; + + hdr = nvfw_bin_hdr(&gsp->subdev, fw->data); + desc = (void *)fw->data + hdr->header_offset; + + ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw); + if (ret) + return ret; + + memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size); + + gsp->boot.code_offset = desc->monitorCodeOffset; + gsp->boot.data_offset = desc->monitorDataOffset; + gsp->boot.manifest_offset = desc->manifestOffset; + gsp->boot.app_version = desc->appVersion; + return 0; +} + +static const struct nvkm_firmware_func +r535_gsp_fw = { + .type = NVKM_FIRMWARE_IMG_SGT, +}; + +static int +r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u64 *psize) +{ + const u8 *img = gsp->fws.rm->data; + const struct elf64_hdr *ehdr = (const struct elf64_hdr *)img; + const struct elf64_shdr *shdr = (const struct elf64_shdr *)&img[ehdr->e_shoff]; + const char *names = &img[shdr[ehdr->e_shstrndx].sh_offset]; + + for (int i = 0; i < ehdr->e_shnum; i++, shdr++) { + if (!strcmp(&names[shdr->sh_name], name)) { + *pdata = &img[shdr->sh_offset]; + *psize = shdr->sh_size; + return 0; + } + } + + nvkm_error(&gsp->subdev, "section '%s' not found\n", name); + return -ENOENT; +} + +#ifdef CONFIG_DEBUG_FS + +struct r535_gsp_log { + struct nvif_log log; + + /* + * Logging buffers in debugfs. The wrapper objects need to remain + * in memory until the dentry is deleted. + */ + struct dentry *debugfs_logging_dir; + struct debugfs_blob_wrapper blob_init; + struct debugfs_blob_wrapper blob_intr; + struct debugfs_blob_wrapper blob_rm; + struct debugfs_blob_wrapper blob_pmu; +}; + +/** + * r535_debugfs_shutdown - delete GSP-RM logging buffers for one GPU + * @_log: nvif_log struct for this GPU + * + * Called when the driver is shutting down, to clean up the retained GSP-RM + * logging buffers. + */ +static void r535_debugfs_shutdown(struct nvif_log *_log) +{ + struct r535_gsp_log *log = container_of(_log, struct r535_gsp_log, log); + + debugfs_remove(log->debugfs_logging_dir); + + kfree(log->blob_init.data); + kfree(log->blob_intr.data); + kfree(log->blob_rm.data); + kfree(log->blob_pmu.data); + + /* We also need to delete the list object */ + kfree(log); +} + +/** + * is_empty - return true if the logging buffer was never written to + * @b: blob wrapper with ->data field pointing to logging buffer + * + * The first 64-bit field of loginit, and logintr, and logrm is the 'put' + * pointer, and it is initialized to 0. It's a dword-based index into the + * circular buffer, indicating where the next printf write will be made. + * + * If the pointer is still 0 when GSP-RM is shut down, that means that the + * buffer was never written to, so it can be ignored. + * + * This test also works for logpmu, even though it doesn't have a put pointer. + */ +static bool is_empty(const struct debugfs_blob_wrapper *b) +{ + u64 *put = b->data; + + return put ? (*put == 0) : true; +} + +/** + * r535_gsp_copy_log - preserve the logging buffers in a blob + * @parent: the top-level dentry for this GPU + * @name: name of debugfs entry to create + * @s: original wrapper object to copy from + * @t: new wrapper object to copy to + * + * When GSP shuts down, the nvkm_gsp object and all its memory is deleted. + * To preserve the logging buffers, the buffers need to be copied, but only + * if they actually have data. + */ +static int r535_gsp_copy_log(struct dentry *parent, + const char *name, + const struct debugfs_blob_wrapper *s, + struct debugfs_blob_wrapper *t) +{ + struct dentry *dent; + void *p; + + if (is_empty(s)) + return 0; + + /* The original buffers will be deleted */ + p = kmemdup(s->data, s->size, GFP_KERNEL); + if (!p) + return -ENOMEM; + + t->data = p; + t->size = s->size; + + dent = debugfs_create_blob(name, 0444, parent, t); + if (IS_ERR(dent)) { + kfree(p); + memset(t, 0, sizeof(*t)); + return PTR_ERR(dent); + } + + i_size_write(d_inode(dent), t->size); + + return 0; +} + +/** + * r535_gsp_retain_logging - copy logging buffers to new debugfs root + * @gsp: gsp pointer + * + * If keep_gsp_logging is enabled, then we want to preserve the GSP-RM logging + * buffers and their debugfs entries, but all those objects would normally + * deleted if GSP-RM fails to load. + * + * To preserve the logging buffers, we need to: + * + * 1) Allocate new buffers and copy the logs into them, so that the original + * DMA buffers can be released. + * + * 2) Preserve the directories. We don't need to save single dentries because + * we're going to delete the parent when the + * + * If anything fails in this process, then all the dentries need to be + * deleted. We don't need to deallocate the original logging buffers because + * the caller will do that regardless. + */ +static void r535_gsp_retain_logging(struct nvkm_gsp *gsp) +{ + struct device *dev = gsp->subdev.device->dev; + struct r535_gsp_log *log = NULL; + int ret; + + if (!keep_gsp_logging || !gsp->debugfs.parent) { + /* Nothing to do */ + goto exit; + } + + /* Check to make sure at least one buffer has data. */ + if (is_empty(&gsp->blob_init) && is_empty(&gsp->blob_intr) && + is_empty(&gsp->blob_rm) && is_empty(&gsp->blob_rm)) { + nvkm_warn(&gsp->subdev, "all logging buffers are empty\n"); + goto exit; + } + + log = kzalloc(sizeof(*log), GFP_KERNEL); + if (!log) + goto error; + + /* + * Since the nvkm_gsp object is going away, the debugfs_blob_wrapper + * objects are also being deleted, which means the dentries will no + * longer be valid. Delete the existing entries so that we can create + * new ones with the same name. + */ + debugfs_remove(gsp->debugfs.init); + debugfs_remove(gsp->debugfs.intr); + debugfs_remove(gsp->debugfs.rm); + debugfs_remove(gsp->debugfs.pmu); + + ret = r535_gsp_copy_log(gsp->debugfs.parent, "loginit", &gsp->blob_init, &log->blob_init); + if (ret) + goto error; + + ret = r535_gsp_copy_log(gsp->debugfs.parent, "logintr", &gsp->blob_intr, &log->blob_intr); + if (ret) + goto error; + + ret = r535_gsp_copy_log(gsp->debugfs.parent, "logrm", &gsp->blob_rm, &log->blob_rm); + if (ret) + goto error; + + ret = r535_gsp_copy_log(gsp->debugfs.parent, "logpmu", &gsp->blob_pmu, &log->blob_pmu); + if (ret) + goto error; + + /* The nvkm_gsp object is going away, so save the dentry */ + log->debugfs_logging_dir = gsp->debugfs.parent; + + log->log.shutdown = r535_debugfs_shutdown; + list_add(&log->log.entry, &gsp_logs.head); + + nvkm_warn(&gsp->subdev, + "logging buffers migrated to /sys/kernel/debug/nouveau/%s\n", + dev_name(dev)); + + return; + +error: + nvkm_warn(&gsp->subdev, "failed to migrate logging buffers\n"); + +exit: + debugfs_remove(gsp->debugfs.parent); + + if (log) { + kfree(log->blob_init.data); + kfree(log->blob_intr.data); + kfree(log->blob_rm.data); + kfree(log->blob_pmu.data); + kfree(log); + } +} + +#endif + +/** + * r535_gsp_libos_debugfs_fini - cleanup/retain log buffers on shutdown + * @gsp: gsp pointer + * + * If the log buffers are exposed via debugfs, the data for those entries + * needs to be cleaned up when the GSP device shuts down. + */ +static void +r535_gsp_libos_debugfs_fini(struct nvkm_gsp __maybe_unused *gsp) +{ +#ifdef CONFIG_DEBUG_FS + r535_gsp_retain_logging(gsp); + + /* + * Unlike the other buffers, the PMU blob is a kmalloc'd buffer that + * exists only if the debugfs entries were created. + */ + kfree(gsp->blob_pmu.data); + gsp->blob_pmu.data = NULL; +#endif +} + +void +r535_gsp_dtor(struct nvkm_gsp *gsp) +{ + idr_destroy(&gsp->client_id.idr); + mutex_destroy(&gsp->client_id.mutex); + + nvkm_gsp_radix3_dtor(gsp, &gsp->radix3); + nvkm_gsp_mem_dtor(&gsp->sig); + nvkm_firmware_dtor(&gsp->fw); + + nvkm_falcon_fw_dtor(&gsp->booter.unload); + nvkm_falcon_fw_dtor(&gsp->booter.load); + + mutex_destroy(&gsp->msgq.mutex); + mutex_destroy(&gsp->cmdq.mutex); + + nvkm_gsp_dtor_fws(gsp); + + nvkm_gsp_mem_dtor(&gsp->rmargs); + nvkm_gsp_mem_dtor(&gsp->wpr_meta); + nvkm_gsp_mem_dtor(&gsp->shm.mem); + + r535_gsp_libos_debugfs_fini(gsp); + + nvkm_gsp_mem_dtor(&gsp->loginit); + nvkm_gsp_mem_dtor(&gsp->logintr); + nvkm_gsp_mem_dtor(&gsp->logrm); +} + +int +r535_gsp_oneinit(struct nvkm_gsp *gsp) +{ + struct nvkm_device *device = gsp->subdev.device; + const u8 *data; + u64 size; + int ret; + + mutex_init(&gsp->cmdq.mutex); + mutex_init(&gsp->msgq.mutex); + + /* Load GSP firmware from ELF image into DMA-accessible memory. */ + ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size); + if (ret) + return ret; + + ret = nvkm_firmware_ctor(&r535_gsp_fw, "gsp-rm", device, data, size, &gsp->fw); + if (ret) + return ret; + + /* Load relevant signature from ELF image. */ + ret = r535_gsp_elf_section(gsp, gsp->func->sig_section, &data, &size); + if (ret) + return ret; + + ret = nvkm_gsp_mem_ctor(gsp, ALIGN(size, 256), &gsp->sig); + if (ret) + return ret; + + memcpy(gsp->sig.data, data, size); + + /* Build radix3 page table for ELF image. */ + ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3); + if (ret) + return ret; + + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, + r535_gsp_msg_run_cpu_sequencer, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, + r535_gsp_msg_rc_triggered, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, + r535_gsp_msg_mmu_fault_queued, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL); + ret = r535_gsp_rm_boot_ctor(gsp); + if (ret) + return ret; + + /* Release FW images - we've copied them to DMA buffers now. */ + nvkm_gsp_dtor_fws(gsp); + + /* Calculate FB layout. */ + gsp->fb.wpr2.frts.size = 0x100000; + gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size; + + gsp->fb.wpr2.boot.size = gsp->boot.fw.size; + gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000); + + gsp->fb.wpr2.elf.size = gsp->fw.len; + gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000); + + { + u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30); + + gsp->fb.wpr2.heap.size = + gsp->func->wpr_heap.os_carveout_size + + gsp->func->wpr_heap.base_size + + ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) + + ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20); + + gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size); + } + + gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000); + gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000); + + gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000); + gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr; + + gsp->fb.heap.size = 0x100000; + gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size; + + ret = nvkm_gsp_fwsec_frts(gsp); + if (WARN_ON(ret)) + return ret; + + ret = r535_gsp_libos_init(gsp); + if (WARN_ON(ret)) + return ret; + + ret = r535_gsp_wpr_meta_init(gsp); + if (WARN_ON(ret)) + return ret; + + ret = r535_gsp_rpc_set_system_info(gsp); + if (WARN_ON(ret)) + return ret; + + ret = r535_gsp_rpc_set_registry(gsp); + if (WARN_ON(ret)) + return ret; + + mutex_init(&gsp->client_id.mutex); + idr_init(&gsp->client_id.idr); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c new file mode 100644 index 000000000000..16c1928f6d68 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c @@ -0,0 +1,110 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#include +#include +#include + +#include +#include + +struct r535_nvdec_obj { + struct nvkm_object object; + struct nvkm_gsp_object rm; +}; + +static void * +r535_nvdec_obj_dtor(struct nvkm_object *object) +{ + struct r535_nvdec_obj *obj = container_of(object, typeof(*obj), object); + + nvkm_gsp_rm_free(&obj->rm); + return obj; +} + +static const struct nvkm_object_func +r535_nvdec_obj = { + .dtor = r535_nvdec_obj_dtor, +}; + +static int +r535_nvdec_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); + struct r535_nvdec_obj *obj; + NV_BSP_ALLOCATION_PARAMETERS *args; + + if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&r535_nvdec_obj, oclass, &obj->object); + *pobject = &obj->object; + + args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, + sizeof(*args), &obj->rm); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->size = sizeof(*args); + args->engineInstance = oclass->engine->subdev.inst; + + return nvkm_gsp_rm_alloc_wr(&obj->rm, args); +} + +static void * +r535_nvdec_dtor(struct nvkm_engine *engine) +{ + struct nvkm_nvdec *nvdec = nvkm_nvdec(engine); + + kfree(nvdec->engine.func); + return nvdec; +} + +int +r535_nvdec_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec) +{ + struct nvkm_engine_func *rm; + int nclass; + + for (nclass = 0; hw->sclass[nclass].oclass; nclass++); + + if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_nvdec_dtor; + for (int i = 0; i < nclass; i++) { + rm->sclass[i].minver = hw->sclass[i].minver; + rm->sclass[i].maxver = hw->sclass[i].maxver; + rm->sclass[i].oclass = hw->sclass[i].oclass; + rm->sclass[i].ctor = r535_nvdec_obj_ctor; + } + + if (!(*pnvdec = kzalloc(sizeof(**pnvdec), GFP_KERNEL))) { + kfree(rm); + return -ENOMEM; + } + + return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvdec)->engine); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c new file mode 100644 index 000000000000..b6808a50c4a8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c @@ -0,0 +1,110 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#include +#include +#include + +#include +#include + +struct r535_nvenc_obj { + struct nvkm_object object; + struct nvkm_gsp_object rm; +}; + +static void * +r535_nvenc_obj_dtor(struct nvkm_object *object) +{ + struct r535_nvenc_obj *obj = container_of(object, typeof(*obj), object); + + nvkm_gsp_rm_free(&obj->rm); + return obj; +} + +static const struct nvkm_object_func +r535_nvenc_obj = { + .dtor = r535_nvenc_obj_dtor, +}; + +static int +r535_nvenc_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); + struct r535_nvenc_obj *obj; + NV_MSENC_ALLOCATION_PARAMETERS *args; + + if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&r535_nvenc_obj, oclass, &obj->object); + *pobject = &obj->object; + + args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, + sizeof(*args), &obj->rm); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->size = sizeof(*args); + args->engineInstance = oclass->engine->subdev.inst; + + return nvkm_gsp_rm_alloc_wr(&obj->rm, args); +} + +static void * +r535_nvenc_dtor(struct nvkm_engine *engine) +{ + struct nvkm_nvenc *nvenc = nvkm_nvenc(engine); + + kfree(nvenc->engine.func); + return nvenc; +} + +int +r535_nvenc_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc) +{ + struct nvkm_engine_func *rm; + int nclass; + + for (nclass = 0; hw->sclass[nclass].oclass; nclass++); + + if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_nvenc_dtor; + for (int i = 0; i < nclass; i++) { + rm->sclass[i].minver = hw->sclass[i].minver; + rm->sclass[i].maxver = hw->sclass[i].maxver; + rm->sclass[i].oclass = hw->sclass[i].oclass; + rm->sclass[i].ctor = r535_nvenc_obj_ctor; + } + + if (!(*pnvenc = kzalloc(sizeof(**pnvenc), GFP_KERNEL))) { + kfree(rm); + return -ENOMEM; + } + + return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvenc)->engine); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c new file mode 100644 index 000000000000..994232b3d030 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c @@ -0,0 +1,107 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#include +#include +#include + +#include +#include + +struct r535_nvjpg_obj { + struct nvkm_object object; + struct nvkm_gsp_object rm; +}; + +static void * +r535_nvjpg_obj_dtor(struct nvkm_object *object) +{ + struct r535_nvjpg_obj *obj = container_of(object, typeof(*obj), object); + + nvkm_gsp_rm_free(&obj->rm); + return obj; +} + +static const struct nvkm_object_func +r535_nvjpg_obj = { + .dtor = r535_nvjpg_obj_dtor, +}; + +static int +r535_nvjpg_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); + struct r535_nvjpg_obj *obj; + NV_NVJPG_ALLOCATION_PARAMETERS *args; + + if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&r535_nvjpg_obj, oclass, &obj->object); + *pobject = &obj->object; + + args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, + sizeof(*args), &obj->rm); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->size = sizeof(*args); + args->engineInstance = oclass->engine->subdev.inst; + + return nvkm_gsp_rm_alloc_wr(&obj->rm, args); +} + +static void * +r535_nvjpg_dtor(struct nvkm_engine *engine) +{ + kfree(engine->func); + return engine; +} + +int +r535_nvjpg_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) +{ + struct nvkm_engine_func *rm; + int nclass, ret; + + for (nclass = 0; hw->sclass[nclass].oclass; nclass++); + + if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_nvjpg_dtor; + for (int i = 0; i < nclass; i++) { + rm->sclass[i].minver = hw->sclass[i].minver; + rm->sclass[i].maxver = hw->sclass[i].maxver; + rm->sclass[i].oclass = hw->sclass[i].oclass; + rm->sclass[i].ctor = r535_nvjpg_obj_ctor; + } + + ret = nvkm_engine_new_(rm, device, type, inst, true, pengine); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c new file mode 100644 index 000000000000..200201c35f0b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c @@ -0,0 +1,107 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#include +#include +#include +#include + +#include +#include + +struct r535_ofa_obj { + struct nvkm_object object; + struct nvkm_gsp_object rm; +}; + +static void * +r535_ofa_obj_dtor(struct nvkm_object *object) +{ + struct r535_ofa_obj *obj = container_of(object, typeof(*obj), object); + + nvkm_gsp_rm_free(&obj->rm); + return obj; +} + +static const struct nvkm_object_func +r535_ofa_obj = { + .dtor = r535_ofa_obj_dtor, +}; + +static int +r535_ofa_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); + struct r535_ofa_obj *obj; + NV_OFA_ALLOCATION_PARAMETERS *args; + + if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&r535_ofa_obj, oclass, &obj->object); + *pobject = &obj->object; + + args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, + sizeof(*args), &obj->rm); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->size = sizeof(*args); + + return nvkm_gsp_rm_alloc_wr(&obj->rm, args); +} + +static void * +r535_ofa_dtor(struct nvkm_engine *engine) +{ + kfree(engine->func); + return engine; +} + +int +r535_ofa_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) +{ + struct nvkm_engine_func *rm; + int nclass, ret; + + for (nclass = 0; hw->sclass[nclass].oclass; nclass++); + + if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_ofa_dtor; + for (int i = 0; i < nclass; i++) { + rm->sclass[i].minver = hw->sclass[i].minver; + rm->sclass[i].maxver = hw->sclass[i].maxver; + rm->sclass[i].oclass = hw->sclass[i].oclass; + rm->sclass[i].ctor = r535_ofa_obj_ctor; + } + + ret = nvkm_engine_new_(rm, device, type, inst, true, pengine); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c new file mode 100644 index 000000000000..94cad290e17e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c @@ -0,0 +1,123 @@ +/* + * Copyright 2023 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#include +#include +#include +#include + +static int +r535_mmu_promote_vmm(struct nvkm_vmm *vmm) +{ + NV_VASPACE_ALLOCATION_PARAMETERS *args; + int ret; + + ret = nvkm_gsp_client_device_ctor(vmm->mmu->subdev.device->gsp, + &vmm->rm.client, &vmm->rm.device); + if (ret) + return ret; + + args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, 0x90f10000, FERMI_VASPACE_A, + sizeof(*args), &vmm->rm.object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW; + + ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args); + if (ret) + return ret; + + { + NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl; + + mutex_lock(&vmm->mutex.vmm); + ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000, + &vmm->rm.rsvd); + mutex_unlock(&vmm->mutex.vmm); + if (ret) + return ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object, + NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->pageSize = 0x20000000; + ctrl->virtAddrLo = vmm->rm.rsvd->addr; + ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1; + ctrl->numLevelsToCopy = vmm->pd->pde[0]->pde[0] ? 3 : 2; + ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr; + ctrl->levels[0].size = 0x20; + ctrl->levels[0].aperture = 1; + ctrl->levels[0].pageShift = 0x2f; + ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr; + ctrl->levels[1].size = 0x1000; + ctrl->levels[1].aperture = 1; + ctrl->levels[1].pageShift = 0x26; + if (vmm->pd->pde[0]->pde[0]) { + ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr; + ctrl->levels[2].size = 0x1000; + ctrl->levels[2].aperture = 1; + ctrl->levels[2].pageShift = 0x1d; + } + + ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl); + } + + return ret; +} + +static void +r535_mmu_dtor(struct nvkm_mmu *mmu) +{ + kfree(mmu->func); +} + +int +r535_mmu_new(const struct nvkm_mmu_func *hw, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) +{ + struct nvkm_mmu_func *rm; + int ret; + + if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) + return -ENOMEM; + + rm->dtor = r535_mmu_dtor; + rm->dma_bits = hw->dma_bits; + rm->mmu = hw->mmu; + rm->mem = hw->mem; + rm->vmm = hw->vmm; + rm->kind = hw->kind; + rm->kind_sys = hw->kind_sys; + rm->promote_vmm = r535_mmu_promote_vmm; + + ret = nvkm_mmu_new_(rm, device, type, inst, pmmu); + if (ret) + kfree(rm); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild index 553d540f2736..06cbe19ce376 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild @@ -4,5 +4,3 @@ nvkm-y += nvkm/subdev/instmem/nv04.o nvkm-y += nvkm/subdev/instmem/nv40.o nvkm-y += nvkm/subdev/instmem/nv50.o nvkm-y += nvkm/subdev/instmem/gk20a.o - -nvkm-y += nvkm/subdev/instmem/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c deleted file mode 100644 index 35ba1798ee6e..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c +++ /dev/null @@ -1,333 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include - -#include - -#include -#include -#include -#include -#include -#include -#include - -struct fbsr_item { - const char *type; - u64 addr; - u64 size; - - struct list_head head; -}; - -struct fbsr { - struct list_head items; - - u64 size; - int regions; - - struct nvkm_gsp_client client; - struct nvkm_gsp_device device; - - u64 hmemory; - u64 sys_offset; -}; - -static int -fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper, - u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object) -{ - struct nvkm_gsp_client *client = device->object.client; - struct nvkm_gsp *gsp = client->gsp; - const u32 pages = size / GSP_PAGE_SIZE; - rpc_alloc_memory_v13_01 *rpc; - int ret; - - rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY, - sizeof(*rpc) + pages * sizeof(rpc->pteDesc.pte_pde[0])); - if (IS_ERR(rpc)) - return PTR_ERR(rpc); - - rpc->hClient = client->object.handle; - rpc->hDevice = device->object.handle; - rpc->hMemory = handle; - if (aper == NVKM_MEM_TARGET_HOST) { - rpc->hClass = NV01_MEMORY_LIST_SYSTEM; - rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, NONCONTIGUOUS) | - NVDEF(NVOS02, FLAGS, LOCATION, PCI) | - NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP); - } else { - rpc->hClass = NV01_MEMORY_LIST_FBMEM; - rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, CONTIGUOUS) | - NVDEF(NVOS02, FLAGS, LOCATION, VIDMEM) | - NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP); - rpc->format = 6; /* NV_MMU_PTE_KIND_GENERIC_MEMORY */ - } - rpc->pteAdjust = 0; - rpc->length = size; - rpc->pageCount = pages; - rpc->pteDesc.idr = 0; - rpc->pteDesc.reserved1 = 0; - rpc->pteDesc.length = pages; - - if (sgt) { - struct scatterlist *sgl; - int pte = 0, idx; - - for_each_sgtable_dma_sg(sgt, sgl, idx) { - for (int i = 0; i < sg_dma_len(sgl) / GSP_PAGE_SIZE; i++) - rpc->pteDesc.pte_pde[pte++].pte = (sg_dma_address(sgl) >> 12) + i; - - } - } else { - for (int i = 0; i < pages; i++) - rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i; - } - - ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_POLL); - if (ret) - return ret; - - object->client = device->object.client; - object->parent = &device->object; - object->handle = handle; - return 0; -} - -static int -fbsr_send(struct fbsr *fbsr, struct fbsr_item *item) -{ - NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS *ctrl; - struct nvkm_gsp *gsp = fbsr->client.gsp; - struct nvkm_gsp_object memlist; - int ret; - - ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM, - item->addr, item->size, NULL, &memlist); - if (ret) - return ret; - - ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, - NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) { - ret = PTR_ERR(ctrl); - goto done; - } - - ctrl->fbsrType = FBSR_TYPE_DMA; - ctrl->hClient = fbsr->client.object.handle; - ctrl->hVidMem = fbsr->hmemory++; - ctrl->vidOffset = 0; - ctrl->sysOffset = fbsr->sys_offset; - ctrl->size = item->size; - - ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); -done: - nvkm_gsp_rm_free(&memlist); - if (ret) - return ret; - - fbsr->sys_offset += item->size; - return 0; -} - -static int -fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size) -{ - NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl; - struct nvkm_gsp *gsp = fbsr->client.gsp; - struct nvkm_gsp_object memlist; - int ret; - - ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST, - 0, fbsr->size, sgt, &memlist); - if (ret) - return ret; - - ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, - NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->fbsrType = FBSR_TYPE_DMA; - ctrl->numRegions = fbsr->regions; - ctrl->hClient = fbsr->client.object.handle; - ctrl->hSysMem = fbsr->hmemory++; - ctrl->gspFbAllocsSysOffset = items_size; - - ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); - if (ret) - return ret; - - nvkm_gsp_rm_free(&memlist); - return 0; -} - -static bool -fbsr_vram(struct fbsr *fbsr, const char *type, u64 addr, u64 size) -{ - struct fbsr_item *item; - - if (!(item = kzalloc(sizeof(*item), GFP_KERNEL))) - return false; - - item->type = type; - item->addr = addr; - item->size = size; - list_add_tail(&item->head, &fbsr->items); - return true; -} - -static bool -fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory) -{ - return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory)); -} - -static void -r535_instmem_resume(struct nvkm_instmem *imem) -{ - /* RM has restored VRAM contents already, so just need to free the sysmem buffer. */ - if (imem->rm.fbsr_valid) { - nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr); - imem->rm.fbsr_valid = false; - } -} - -static int -r535_instmem_suspend(struct nvkm_instmem *imem) -{ - struct nvkm_subdev *subdev = &imem->subdev; - struct nvkm_device *device = subdev->device; - struct nvkm_gsp *gsp = device->gsp; - struct nvkm_instobj *iobj; - struct fbsr fbsr = {}; - struct fbsr_item *item, *temp; - u64 items_size; - int ret; - - INIT_LIST_HEAD(&fbsr.items); - fbsr.hmemory = 0xcaf00003; - - /* Create a list of all regions we need RM to save during suspend. */ - list_for_each_entry(iobj, &imem->list, head) { - if (iobj->preserve) { - if (!fbsr_inst(&fbsr, "inst", &iobj->memory)) - return -ENOMEM; - } - } - - list_for_each_entry(iobj, &imem->boot, head) { - if (!fbsr_inst(&fbsr, "boot", &iobj->memory)) - return -ENOMEM; - } - - if (!fbsr_vram(&fbsr, "gsp-non-wpr", gsp->fb.heap.addr, gsp->fb.heap.size)) - return -ENOMEM; - - /* Determine memory requirements. */ - list_for_each_entry(item, &fbsr.items, head) { - nvkm_debug(subdev, "fbsr: %016llx %016llx %s\n", - item->addr, item->size, item->type); - fbsr.size += item->size; - fbsr.regions++; - } - - items_size = fbsr.size; - nvkm_debug(subdev, "fbsr: %d regions (0x%llx bytes)\n", fbsr.regions, items_size); - - fbsr.size += gsp->fb.rsvd_size; - fbsr.size += gsp->fb.bios.vga_workspace.size; - nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size); - - ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr); - if (ret) - goto done; - - /* Tell RM about the sysmem which will hold VRAM contents across suspend. */ - ret = nvkm_gsp_client_device_ctor(gsp, &fbsr.client, &fbsr.device); - if (ret) - goto done_sgt; - - ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size); - if (WARN_ON(ret)) - goto done_sgt; - - /* Send VRAM regions that need saving. */ - list_for_each_entry(item, &fbsr.items, head) { - ret = fbsr_send(&fbsr, item); - if (WARN_ON(ret)) - goto done_sgt; - } - - imem->rm.fbsr_valid = true; - - /* Cleanup everything except the sysmem backup, which will be removed after resume. */ -done_sgt: - if (ret) /* ... unless we failed already. */ - nvkm_gsp_sg_free(device, &imem->rm.fbsr); -done: - list_for_each_entry_safe(item, temp, &fbsr.items, head) { - list_del(&item->head); - kfree(item); - } - - nvkm_gsp_device_dtor(&fbsr.device); - nvkm_gsp_client_dtor(&fbsr.client); - return ret; -} - -static void * -r535_instmem_dtor(struct nvkm_instmem *imem) -{ - kfree(imem->func); - return imem; -} - -int -r535_instmem_new(const struct nvkm_instmem_func *hw, - struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_instmem **pinstmem) -{ - struct nvkm_instmem_func *rm; - int ret; - - if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_instmem_dtor; - rm->fini = hw->fini; - rm->suspend = r535_instmem_suspend; - rm->resume = r535_instmem_resume; - rm->memory_new = hw->memory_new; - rm->memory_wrap = hw->memory_wrap; - rm->zero = false; - - ret = nv50_instmem_new_(rm, device, type, inst, pinstmem); - if (ret) - kfree(rm); - - return ret; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild index 7ba35ea59c06..a602b0cb5b31 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild @@ -16,8 +16,6 @@ nvkm-y += nvkm/subdev/mmu/gp10b.o nvkm-y += nvkm/subdev/mmu/gv100.o nvkm-y += nvkm/subdev/mmu/tu102.o -nvkm-y += nvkm/subdev/mmu/r535.o - nvkm-y += nvkm/subdev/mmu/mem.o nvkm-y += nvkm/subdev/mmu/memnv04.o nvkm-y += nvkm/subdev/mmu/memnv50.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c deleted file mode 100644 index d3e95453f25d..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "vmm.h" - -#include -#include -#include -#include - -static int -r535_mmu_promote_vmm(struct nvkm_vmm *vmm) -{ - NV_VASPACE_ALLOCATION_PARAMETERS *args; - int ret; - - ret = nvkm_gsp_client_device_ctor(vmm->mmu->subdev.device->gsp, - &vmm->rm.client, &vmm->rm.device); - if (ret) - return ret; - - args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, 0x90f10000, FERMI_VASPACE_A, - sizeof(*args), &vmm->rm.object); - if (IS_ERR(args)) - return PTR_ERR(args); - - args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW; - - ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args); - if (ret) - return ret; - - { - NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl; - - mutex_lock(&vmm->mutex.vmm); - ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000, - &vmm->rm.rsvd); - mutex_unlock(&vmm->mutex.vmm); - if (ret) - return ret; - - ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object, - NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->pageSize = 0x20000000; - ctrl->virtAddrLo = vmm->rm.rsvd->addr; - ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1; - ctrl->numLevelsToCopy = vmm->pd->pde[0]->pde[0] ? 3 : 2; - ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr; - ctrl->levels[0].size = 0x20; - ctrl->levels[0].aperture = 1; - ctrl->levels[0].pageShift = 0x2f; - ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr; - ctrl->levels[1].size = 0x1000; - ctrl->levels[1].aperture = 1; - ctrl->levels[1].pageShift = 0x26; - if (vmm->pd->pde[0]->pde[0]) { - ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr; - ctrl->levels[2].size = 0x1000; - ctrl->levels[2].aperture = 1; - ctrl->levels[2].pageShift = 0x1d; - } - - ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl); - } - - return ret; -} - -static void -r535_mmu_dtor(struct nvkm_mmu *mmu) -{ - kfree(mmu->func); -} - -int -r535_mmu_new(const struct nvkm_mmu_func *hw, - struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_mmu **pmmu) -{ - struct nvkm_mmu_func *rm; - int ret; - - if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_mmu_dtor; - rm->dma_bits = hw->dma_bits; - rm->mmu = hw->mmu; - rm->mem = hw->mem; - rm->vmm = hw->vmm; - rm->kind = hw->kind; - rm->kind_sys = hw->kind_sys; - rm->promote_vmm = r535_mmu_promote_vmm; - - ret = nvkm_mmu_new_(rm, device, type, inst, pmmu); - if (ret) - kfree(rm); - - return ret; -} -- cgit v1.2.3 From 0c6aa94f991b00b6799be66880918b68344eb92b Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:37 +1000 Subject: drm/nouveau/gsp: switch to a simpler GSP-RM header layout Rather than using OpenRM's directory structure for headers, move to a layout that's split roughly around RM API boundaries. Also move the headers from include/nvrm to subdev/gsp/rm/r535/nvrm, with the rest of the r535-specific code. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../common/sdk/nvidia/inc/alloc/alloc_channel.h | 170 ----- .../common/sdk/nvidia/inc/class/cl0000.h | 38 - .../common/sdk/nvidia/inc/class/cl0005.h | 38 - .../common/sdk/nvidia/inc/class/cl0080.h | 43 -- .../common/sdk/nvidia/inc/class/cl2080.h | 35 - .../sdk/nvidia/inc/class/cl2080_notification.h | 62 -- .../common/sdk/nvidia/inc/class/cl84a0.h | 33 - .../common/sdk/nvidia/inc/class/cl90f1.h | 31 - .../common/sdk/nvidia/inc/class/clc0b5sw.h | 34 - .../sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h | 39 - .../sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h | 166 ----- .../sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h | 335 --------- .../nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h | 216 ------ .../sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h | 65 -- .../sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h | 57 -- .../sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h | 48 -- .../sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h | 31 - .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h | 40 - .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h | 35 - .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h | 41 -- .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h | 51 -- .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h | 52 -- .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h | 100 --- .../sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h | 41 -- .../nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h | 162 ---- .../common/sdk/nvidia/inc/ctrl/ctrl90f1.h | 95 --- .../sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h | 42 -- .../535.113.01/common/sdk/nvidia/inc/nvlimits.h | 33 - .../nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h | 148 ---- .../common/shared/msgq/inc/msgq/msgq_priv.h | 97 --- .../uproc/os/common/include/libos_init_args.h | 52 -- .../arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h | 79 -- .../arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h | 170 ----- .../nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h | 82 --- .../nvidia/arch/nvalloc/common/inc/rmgspseq.h | 100 --- .../535.113.01/nvidia/generated/g_allclasses.h | 33 - .../535.113.01/nvidia/generated/g_chipset_nvoc.h | 38 - .../nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h | 31 - .../nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h | 35 - .../nvidia/generated/g_kernel_channel_nvoc.h | 62 -- .../nvidia/generated/g_kernel_fifo_nvoc.h | 119 --- .../535.113.01/nvidia/generated/g_mem_desc_nvoc.h | 32 - .../nvrm/535.113.01/nvidia/generated/g_os_nvoc.h | 44 -- .../535.113.01/nvidia/generated/g_rpc-structures.h | 124 ---- .../535.113.01/nvidia/generated/g_sdk-structures.h | 45 -- .../nvidia/inc/kernel/gpu/gpu_acpi_data.h | 74 -- .../nvidia/inc/kernel/gpu/gpu_engine_type.h | 86 --- .../nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h | 33 - .../nvidia/inc/kernel/gpu/gsp/gsp_init_args.h | 57 -- .../nvidia/inc/kernel/gpu/gsp/gsp_static_config.h | 174 ----- .../nvidia/inc/kernel/gpu/intr/engine_idx.h | 57 -- .../535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h | 33 - .../nvidia/inc/kernel/os/nv_memory_type.h | 31 - .../nvidia/kernel/inc/vgpu/rpc_global_enums.h | 262 ------- .../nvidia/kernel/inc/vgpu/rpc_headers.h | 51 -- .../nvidia/kernel/inc/vgpu/sdk-structures.h | 40 - drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h | 2 + .../drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c | 5 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c | 6 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c | 5 +- .../drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c | 3 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c | 5 +- .../drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c | 9 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c | 12 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c | 10 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c | 13 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c | 7 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 21 +- .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c | 3 +- .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c | 3 +- .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c | 3 +- .../nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h | 36 + .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h | 29 + .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h | 15 + .../nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h | 20 + .../nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h | 21 + .../nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h | 30 + .../nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h | 741 +++++++++++++++++++ .../nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h | 260 +++++++ .../nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h | 47 ++ .../nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h | 106 +++ .../nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h | 350 +++++++++ .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h | 73 ++ .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h | 817 +++++++++++++++++++++ .../nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h | 53 ++ .../nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h | 17 + .../nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h | 17 + .../nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h | 17 + .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h | 16 + .../nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h | 225 ++++++ .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h | 90 +++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c | 3 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c | 3 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c | 5 +- 94 files changed, 3010 insertions(+), 4410 deletions(-) delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h deleted file mode 100644 index 7157c7757698..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h +++ /dev/null @@ -1,170 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_alloc_alloc_channel_h__ -#define __src_common_sdk_nvidia_inc_alloc_alloc_channel_h__ -#include - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef struct NV_MEMORY_DESC_PARAMS { - NV_DECLARE_ALIGNED(NvU64 base, 8); - NV_DECLARE_ALIGNED(NvU64 size, 8); - NvU32 addressSpace; - NvU32 cacheAttrib; -} NV_MEMORY_DESC_PARAMS; - -#define NVOS04_FLAGS_CHANNEL_TYPE 1:0 -#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000 -#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE -#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE - -#define NVOS04_FLAGS_VPR 2:2 -#define NVOS04_FLAGS_VPR_FALSE 0x00000000 -#define NVOS04_FLAGS_VPR_TRUE 0x00000001 - -#define NVOS04_FLAGS_CC_SECURE 2:2 -#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000 -#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001 - -#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3 -#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000 -#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001 - -#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4 -#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000 -#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001 - -#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5 -#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000 -#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001 - -#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6 -#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000 -#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001 - -#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7 -#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000 -#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001 - -#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8 - -#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11 -#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000 -#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001 - -#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12 - -#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21 -#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000 -#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001 - -#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22 -#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000 -#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001 - -#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23 -#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000 -#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001 - -#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24 -#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000 -#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001 - -#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25 -#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000 -#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001 - -#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26 -#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000 -#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001 - -#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27 -#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000 -#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001 - -#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28 -#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000 -#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001 -#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002 - -#define NVOS04_FLAGS_MAP_CHANNEL 30:30 -#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000 -#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001 - -#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31 -#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000 -#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001 - -#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U -#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U - -typedef struct NV_CHANNEL_ALLOC_PARAMS { - - NvHandle hObjectError; // error context DMA - NvHandle hObjectBuffer; // no longer used - NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO - NvU32 gpFifoEntries; // number of GP FIFO entries - - NvU32 flags; - - - NvHandle hContextShare; // context share handle - NvHandle hVASpace; // VASpace for the channel - - // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0 - NvHandle hUserdMemory[NV_MAX_SUBDEVICES]; - - // offset to beginning of UserD within hUserdMemory[x] - NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8); - - // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated - NvU32 engineType; - // Channel identifier that is unique for the duration of a RM session - NvU32 cid; - // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods - NvU32 subDeviceId; - NvHandle hObjectEccError; // ECC error context DMA - - NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8); - NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8); - NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8); - NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8); - - NvHandle hPhysChannelGroup; // reserved - NvU32 internalFlags; // reserved - NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved - NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved - NvU32 ProcessID; // reserved - NvU32 SubProcessID; // reserved - // IV used for CPU-side encryption / GPU-side decryption. - NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved - // IV used for CPU-side decryption / GPU-side encryption. - NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved - // Nonce used CPU-side signing / GPU-side signature verification. - NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved -} NV_CHANNEL_ALLOC_PARAMS; - -typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h deleted file mode 100644 index 7a3fc023072d..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_class_cl0000_h__ -#define __src_common_sdk_nvidia_inc_class_cl0000_h__ -#include - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */ - -typedef struct NV0000_ALLOC_PARAMETERS { - NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */ - NvU32 processID; - char processName[NV_PROC_NAME_MAX_LENGTH]; -} NV0000_ALLOC_PARAMETERS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h deleted file mode 100644 index e4de36d63666..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_class_cl0005_h__ -#define __src_common_sdk_nvidia_inc_class_cl0005_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef struct NV0005_ALLOC_PARAMETERS { - NvHandle hParentClient; - NvHandle hSrcResource; - - NvV32 hClass; - NvV32 notifyIndex; - NV_DECLARE_ALIGNED(NvP64 data, 8); -} NV0005_ALLOC_PARAMETERS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h deleted file mode 100644 index 8868118e47d6..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_class_cl0080_h__ -#define __src_common_sdk_nvidia_inc_class_cl0080_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV01_DEVICE_0 (0x80U) /* finn: Evaluated from "NV0080_ALLOC_PARAMETERS_MESSAGE_ID" */ - -typedef struct NV0080_ALLOC_PARAMETERS { - NvU32 deviceId; - NvHandle hClientShare; - NvHandle hTargetClient; - NvHandle hTargetDevice; - NvV32 flags; - NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8); - NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8); - NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8); - NvV32 vaMode; -} NV0080_ALLOC_PARAMETERS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h deleted file mode 100644 index 9040ea5608a0..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_class_cl2080_h__ -#define __src_common_sdk_nvidia_inc_class_cl2080_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2002-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV20_SUBDEVICE_0 (0x2080U) /* finn: Evaluated from "NV2080_ALLOC_PARAMETERS_MESSAGE_ID" */ - -typedef struct NV2080_ALLOC_PARAMETERS { - NvU32 subDeviceId; -} NV2080_ALLOC_PARAMETERS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h deleted file mode 100644 index ba659d6477d3..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h +++ /dev/null @@ -1,62 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_class_cl2080_notification_h__ -#define __src_common_sdk_nvidia_inc_class_cl2080_notification_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV2080_NOTIFIERS_HOTPLUG (1) - -#define NV2080_NOTIFIERS_DP_IRQ (7) - -#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001) -#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS - -#define NV2080_ENGINE_TYPE_COPY0 (0x00000009) - -#define NV2080_ENGINE_TYPE_BSP (0x00000013) -#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP - -#define NV2080_ENGINE_TYPE_MSENC (0x0000001b) -#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */ - -#define NV2080_ENGINE_TYPE_SW (0x00000022) - -#define NV2080_ENGINE_TYPE_SEC2 (0x00000026) - -#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b) -#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG - -#define NV2080_ENGINE_TYPE_OFA (0x00000033) - -typedef struct { - NvU32 plugDisplayMask; - NvU32 unplugDisplayMask; -} Nv2080HotplugNotification; - -typedef struct Nv2080DpIrqNotificationRec { - NvU32 displayId; -} Nv2080DpIrqNotification; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h deleted file mode 100644 index 9eb780a1ac72..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_class_cl84a0_h__ -#define __src_common_sdk_nvidia_inc_class_cl84a0_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV01_MEMORY_LIST_SYSTEM (0x00000081) - -#define NV01_MEMORY_LIST_FBMEM (0x00000082) - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h deleted file mode 100644 index f1d21776e395..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_class_cl90f1_h__ -#define __src_common_sdk_nvidia_inc_class_cl90f1_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define FERMI_VASPACE_A (0x000090f1) - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h deleted file mode 100644 index b8f32576cfaa..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h +++ /dev/null @@ -1,34 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_class_clc0b5sw_h__ -#define __src_common_sdk_nvidia_inc_class_clc0b5sw_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef struct NVC0B5_ALLOCATION_PARAMETERS { - NvU32 version; - NvU32 engineType; -} NVC0B5_ALLOCATION_PARAMETERS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h deleted file mode 100644 index 58b3ba7badf1..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h +++ /dev/null @@ -1,39 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS { - NvBool bDscSupported; - NvU32 encoderColorFormatMask; - NvU32 lineBufferSizeKB; - NvU32 rateBufferSizeKB; - NvU32 bitsPerPixelPrecision; - NvU32 maxNumHztSlices; - NvU32 lineBufferBitDepth; -} NV0073_CTRL_CMD_DSC_CAP_PARAMS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h deleted file mode 100644 index 596f2ea8344e..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h +++ /dev/null @@ -1,166 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dfp_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dfp_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayId; - NvU32 flags; - NvU32 flags2; -} NV0073_CTRL_DFP_GET_INFO_PARAMS; - -#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0 -#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U) -#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U) -#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U) -#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U) -#define NV0073_CTRL_DFP_FLAGS_LANE 5:3 -#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U) -#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U) -#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U) -#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6 -#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7 -#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8 -#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9 -#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10 -#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11 -#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12 -#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14 -#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15 -#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16 -#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17 -#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U) -#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U) -#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U) -#define NV0073_CTRL_DFP_FLAGS_LINK 21:20 -#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U) -#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22 -#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23 -#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U) -#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U) -#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25 -#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U) -#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26 -#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30 -#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U) -#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U) - -#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */ - -#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER 96U - -typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayId; - NvU32 numELDSize; - NvU8 bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER]; - NvU32 maxFreqSupported; - NvU32 ctrl; - NvU32 deviceEntry; -} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS; - -#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD 0:0 -#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE (0x00000000U) -#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE (0x00000001U) -#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV 1:1 -#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE (0x00000000U) -#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE (0x00000001U) - -#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayId; - NvBool enable; -} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS; - -typedef NvU32 NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG; - -typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO { - NvU32 displayMask; - NvU32 sorType; -} NV0073_CTRL_DFP_ASSIGN_SOR_INFO; - -#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */ - -#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS 4U - -typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayId; - NvU8 sorExcludeMask; - NvU32 slaveDisplayId; - NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig; - NvBool bIs2Head1Or; - NvU32 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS]; - NV0073_CTRL_DFP_ASSIGN_SOR_INFO sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS]; - NvU8 reservedSorMask; - NvU32 flags; -} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS; - -#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO 0:0 -#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL (0x00000001U) -#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT (0x00000000U) -#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE 1:1 -#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO (0x00000000U) -#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U) - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h deleted file mode 100644 index bae4b1997736..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h +++ /dev/null @@ -1,335 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dp_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dp_h__ -#include - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV0073_CTRL_CMD_DP_AUXCH_CTRL (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */ - -#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U - -typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayId; - NvBool bAddrOnly; - NvU32 cmd; - NvU32 addr; - NvU8 data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE]; - NvU32 size; - NvU32 replyType; - NvU32 retryTimeMs; -} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS; - -#define NV0073_CTRL_DP_AUXCH_CMD_TYPE 3:3 -#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C (0x00000000U) -#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX (0x00000001U) -#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT 2:2 -#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE (0x00000000U) -#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE (0x00000001U) -#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE 1:0 -#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE (0x00000000U) -#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ (0x00000001U) -#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS (0x00000002U) - -#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_DP_CTRL_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayId; - NvU32 cmd; - NvU32 data; - NvU32 err; - NvU32 retryTimeMs; - NvU32 eightLaneDpcdBaseAddr; -} NV0073_CTRL_DP_CTRL_PARAMS; - -#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT 0:0 -#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE (0x00000000U) -#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE (0x00000001U) -#define NV0073_CTRL_DP_CMD_SET_LINK_BW 1:1 -#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE (0x00000000U) -#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE (0x00000001U) -#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD 2:2 -#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE (0x00000000U) -#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE (0x00000001U) -#define NV0073_CTRL_DP_CMD_UNUSED 3:3 -#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE 4:4 -#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM (0x00000000U) -#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM (0x00000001U) -#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING 5:5 -#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO (0x00000000U) -#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES (0x00000001U) -#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING 6:6 -#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO (0x00000000U) -#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES (0x00000001U) -#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING 7:7 -#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE (0x00000000U) -#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE (0x00000001U) -#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING 8:8 -#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT (0x00000000U) -#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE (0x00000001U) -#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING 9:9 -#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO (0x00000000U) -#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES (0x00000001U) -#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED 10:10 -#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000U) -#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001U) -#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING 12:11 -#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO (0x00000000U) -#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U) -#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON (0x00000002U) -#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER 13:13 -#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO (0x00000000U) -#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES (0x00000001U) -#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG 14:14 -#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE (0x00000000U) -#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE (0x00000001U) -#define NV0073_CTRL_DP_CMD_ENABLE_FEC 15:15 -#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE (0x00000000U) -#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE (0x00000001U) - -#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST 29:29 -#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO (0x00000000U) -#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES (0x00000001U) -#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE 30:30 -#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE (0x00000000U) -#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE (0x00000001U) -#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG 31:31 -#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE (0x00000000U) -#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE (0x00000001U) - -#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT 4:0 -#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 (0x00000000U) -#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 (0x00000001U) -#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 (0x00000002U) -#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 (0x00000004U) -#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 (0x00000008U) -#define NV0073_CTRL_DP_DATA_SET_LINK_BW 15:8 -#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS (0x00000006U) -#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS (0x00000008U) -#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS (0x00000009U) -#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS (0x0000000AU) -#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS (0x0000000CU) -#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS (0x00000010U) -#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS (0x00000014U) -#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS (0x0000001EU) -#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING 18:18 -#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO (0x00000000U) -#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES (0x00000001U) -#define NV0073_CTRL_DP_DATA_TARGET 22:19 -#define NV0073_CTRL_DP_DATA_TARGET_SINK (0x00000000U) -#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0 (0x00000001U) -#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1 (0x00000002U) -#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2 (0x00000003U) -#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3 (0x00000004U) -#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4 (0x00000005U) -#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5 (0x00000006U) -#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6 (0x00000007U) -#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7 (0x00000008U) - -#define NV0073_CTRL_MAX_LANES 8U - -typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayId; - NvU32 numLanes; - NvU32 data[NV0073_CTRL_MAX_LANES]; -} NV0073_CTRL_DP_LANE_DATA_PARAMS; - -#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS 1:0 -#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE (0x00000000U) -#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 (0x00000001U) -#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 (0x00000002U) -#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 (0x00000003U) -#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT 3:2 -#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U) -#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U) -#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U) -#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U) - -#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID" */ - -#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayId; - NvU32 mute; -} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS; - -#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayId; - NvU32 preferredDisplayId; - - NvBool force; - NvBool useBFM; - - NvU32 displayIdAssigned; - NvU32 allDisplayMask; -} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS; - -#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayId; -} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS; - -#define NV0073_CTRL_CMD_DP_CONFIG_STREAM (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS { - NvU32 subDeviceInstance; - NvU32 head; - NvU32 sorIndex; - NvU32 dpLink; - - NvBool bEnableOverride; - NvBool bMST; - NvU32 singleHeadMultistreamMode; - NvU32 hBlankSym; - NvU32 vBlankSym; - NvU32 colorFormat; - NvBool bEnableTwoHeadOneOr; - - struct { - NvU32 slotStart; - NvU32 slotEnd; - NvU32 PBN; - NvU32 Timeslice; - NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT - NvU32 singleHeadMSTPipeline; - NvBool bEnableAudioOverRightPanel; - } MST; - - struct { - NvBool bEnhancedFraming; - NvU32 tuSize; - NvU32 waterMark; - NvU32 actualPclkHz; // deprecated -Use MvidWarParams - NvU32 linkClkFreqHz; // deprecated -Use MvidWarParams - NvBool bEnableAudioOverRightPanel; - struct { - NvU32 activeCnt; - NvU32 activeFrac; - NvU32 activePolarity; - NvBool mvidWarEnabled; - struct { - NvU32 actualPclkHz; - NvU32 linkClkFreqHz; - } MvidWarParams; - } Legacy; - } SST; -} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS; - -#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS { - NvU32 subDeviceInstance; -} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS; - -#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */ - -#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U) - -typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS { - NvU32 subDeviceInstance; - NvU32 sorIndex; - NvU32 maxLinkRate; - NvU32 dpVersionsSupported; - NvU32 UHBRSupported; - NvBool bIsMultistreamSupported; - NvBool bIsSCEnabled; - NvBool bHasIncreasedWatermarkLimits; - NvBool bIsPC2Disabled; - NvBool isSingleHeadMSTSupported; - NvBool bFECSupported; - NvBool bIsTrainPhyRepeater; - NvBool bOverrideLinkBw; - NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC; -} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS; - -#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0 -#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U) -#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U) -#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1 -#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U) -#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U) - -#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0 -#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U) -#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U) -#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U) -#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U) -#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U) - -#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U) -#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U) -#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U) -#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U) - -#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U) -#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U) -#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U) -#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U) -#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U) - -#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */ - -#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U - -typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS { - // In - NvU32 subDeviceInstance; - NvU32 displayId; - NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES]; - - // Out - NvU8 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES]; - NvU8 linkBwCount; -} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS; - -#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE 3:0 -#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN (0x00000000U) -#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U) -#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK (0x00000002U) -#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN (0x00000003U) -#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE (0x00000004U) -#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK (0x00000005U) -#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR (0x00000006U) -#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO (0x00000007U) -#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO (0x00000008U) -#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK (0x00000009U) - -#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK (0x00000000U) -#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING (0x80000001U) -#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR (0x80000002U) -#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR (0x80000003U) -#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR (0x80000004U) - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h deleted file mode 100644 index 954958dcf834..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h +++ /dev/null @@ -1,216 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073specific_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073specific_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */ - -#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U - -typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayId; - NvU32 bufferSize; - NvU32 flags; - NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES]; -} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS; - -#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */ - -#define NV0073_CTRL_MAX_CONNECTORS 4U - -typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayId; - NvU32 flags; - NvU32 DDCPartners; - NvU32 count; - struct { - NvU32 index; - NvU32 type; - NvU32 location; - } data[NV0073_CTRL_MAX_CONNECTORS]; - NvU32 platform; -} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS; - -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS { - NvU8 subDeviceInstance; - NvU32 displayId; - NvU8 enable; -} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS; - -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM (0x730275U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS { - NvU8 subDeviceInstance; - NvU32 displayId; - NvU8 mute; -} NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS; - -#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS { - NvU32 subDeviceInstance; - NvU32 headMask; -} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS; - -#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */ - -#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE 36U - -typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayId; - NvU32 transmitControl; - NvU32 packetSize; - NvU32 targetHead; - NvBool bUsePsrHeadforSdp; - NvU8 aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE]; -} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS; - -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE 0:0 -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO (0x0000000U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES (0x0000001U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME 1:1 -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE (0x0000000U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE (0x0000001U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME 2:2 -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE (0x0000000U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE (0x0000001U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK 3:3 -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE (0x0000000U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE (0x0000001U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE 4:4 -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE (0x0000000U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE (0x0000001U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT 5:5 -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED (0x0000000U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED (0x0000001U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY 6:6 -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE (0x0000000U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE (0x0000001U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING 7:7 -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE (0x0000001U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE 9:8 -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0 (0x0000000U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1 (0x0000001U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE 31:31 -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO (0x0000000U) -#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES (0x0000001U) - -#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayId; - NvU32 index; - NvU32 type; - NvU32 protocol; - NvU32 ditherType; - NvU32 ditherAlgo; - NvU32 location; - NvU32 rootPortId; - NvU32 dcbIndex; - NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8); - NvBool bIsLitByVbios; - NvBool bIsDispDynamic; -} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS; - -#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE (0x00000000U) -#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC (0x00000001U) -#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR (0x00000002U) -#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR (0x00000003U) - -#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI (0x00000005U) - -#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT (0x00000000U) - -#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000U) -#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001U) -#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002U) -#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS (0x00000005U) -#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A (0x00000008U) -#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B (0x00000009U) -#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI (0x00000010U) - -#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI (0x00000011U) - -#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC (0x00000000U) - -#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN (0xFFFFFFFFU) - -#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayId; - NvU32 brightness; - NvBool bUncalibrated; -} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS; - -#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */ - -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayId; - NvU32 caps; -} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS; - -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED 0:0 -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE (0x00000000U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE (0x00000001U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED 1:1 -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE (0x00000001U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED 2:2 -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE (0x00000000U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE (0x00000001U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED 5:3 -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED 6:6 -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE (0x00000000U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE (0x00000001U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED 9:7 -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U) -#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U) - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h deleted file mode 100644 index d69cef3c01fd..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h +++ /dev/null @@ -1,65 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS { - NvU32 subDeviceInstance; - NvU32 flags; - NvU32 numHeads; -} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS; - -#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS { - NvU32 subDeviceInstance; - NvU32 displayMask; - NvU32 displayMaskDDC; -} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS; - -#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS { - NvU32 subDeviceInstance; - NvU32 flags; - NvU32 displayMask; - NvU32 retryTimeMs; -} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS; - -#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */ - -typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS { - NvU32 subDeviceInstance; - NvU32 head; - NvU32 flags; - NvU32 displayId; -} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS; - -#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U) - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h deleted file mode 100644 index 6acb3f73242d..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h +++ /dev/null @@ -1,57 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080fifo_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080fifo_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0 -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD (0x00000001) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO (0x00000002) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG (0x00000003) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE (0x00000004) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY (0x00000005) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION (0x00000006) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS (0x00000007) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL (0x00000008) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM (0x00000009) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT (0x0000000a) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT (0x0000000b) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL (0x0000000c) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL (0x0000000d) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB (0x0000000e) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV (0x0000000f) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH (0x00000010) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB (0x00000011) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB (0x00000013) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL (0x00000014) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL (0x00000015) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK (0x00000016) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT (0x00000017) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018) -#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x00000019) - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h deleted file mode 100644 index 3db099e62364..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h +++ /dev/null @@ -1,48 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gpu_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gpu_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS { - NvU32 totalVFs; - NvU32 firstVfOffset; - NvU32 vfFeatureMask; - NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8); - NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8); - NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8); - NV_DECLARE_ALIGNED(NvU64 bar0Size, 8); - NV_DECLARE_ALIGNED(NvU64 bar1Size, 8); - NV_DECLARE_ALIGNED(NvU64 bar2Size, 8); - NvBool b64bitBar0; - NvBool b64bitBar1; - NvBool b64bitBar2; - NvBool bSriovEnabled; - NvBool bSriovHeavyEnabled; - NvBool bEmulateVFBar0TlbInvalidationRegister; - NvBool bClientRmAllocatedCtxBuffer; -} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h deleted file mode 100644 index ed01df925573..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gr_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gr_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23 - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h deleted file mode 100644 index b5b7631de99b..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h +++ /dev/null @@ -1,40 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080bios_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080bios_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS { - NvU32 BoardID; - char chipSKU[4]; - char chipSKUMod[2]; - char project[5]; - char projectSKU[5]; - char CDP[6]; - char projectSKUMod[2]; - NvU32 businessCycle; -} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h deleted file mode 100644 index fe912d2bd183..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080ce_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080ce_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS { - NvU32 size; -} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS; - -#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h deleted file mode 100644 index 87bc4ff92ce1..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */ - -typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS { - NvU32 event; - NvU32 action; - NvBool bNotifyState; - NvU32 info32; - NvU16 info16; -} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS; - -#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002) - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h deleted file mode 100644 index 68c81f9f803c..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h +++ /dev/null @@ -1,51 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fb_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fb_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U - -typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES]; - -typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO { - NV_DECLARE_ALIGNED(NvU64 base, 8); - NV_DECLARE_ALIGNED(NvU64 limit, 8); - NV_DECLARE_ALIGNED(NvU64 reserved, 8); - NvU32 performance; - NvBool supportCompressed; - NvBool supportISO; - NvBool bProtected; - NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList; -} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO; - -#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U - -typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS { - NvU32 numFBRegions; - NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8); -} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h deleted file mode 100644 index bc0f63699b06..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fifo_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fifo_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */ - -#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES 32 -#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES 16 -#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA 2 -#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16 - -typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY { - NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES]; - NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA]; - NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA]; - NvU32 numPbdmas; - char engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN]; -} NV2080_CTRL_FIFO_DEVICE_ENTRY; - -typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS { - NvU32 baseIndex; - NvU32 numEntries; - NvBool bMore; - // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES]; - NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES]; -} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h deleted file mode 100644 index 29d7a1052142..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h +++ /dev/null @@ -1,100 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U) - -#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0 (0x00000000U) - -#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U) - -typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY { - NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8); - NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8); - NV_DECLARE_ALIGNED(NvU64 size, 8); - NvU32 physAttr; - NvU16 bufferId; - NvU8 bInitialize; - NvU8 bNonmapped; -} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY; - -#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN 0U -#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM 1U -#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH 2U -#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB 3U -#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL 4U -#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB 5U -#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL 6U -#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL 7U -#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK 8U -#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT 9U -#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP 10U -#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U -#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP 12U - -#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES 16U - -#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */ - -typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS { - NvU32 engineType; - NvHandle hClient; - NvU32 ChID; - NvHandle hChanClient; - NvHandle hObject; - NvHandle hVirtMemory; - NV_DECLARE_ALIGNED(NvU64 virtAddress, 8); - NV_DECLARE_ALIGNED(NvU64 size, 8); - NvU32 entryCount; - // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES]; - NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8); -} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS; - -typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS { - NvU32 gpcMask; -} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS; - -typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS { - NvU32 gpcId; - NvU32 tpcMask; -} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS; - -typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS { - NvU32 gpcId; - NvU32 zcullMask; -} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS; - -#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL) - -typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS { - NvU32 index; - NvU32 flags; - NvU32 length; - NvU8 data[NV2080_GPU_MAX_GID_LENGTH]; -} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h deleted file mode 100644 index 59f8895bc5d7..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gr_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gr_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS { - NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0, - NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1, - NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2, - NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3, - NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4, - NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5, - NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6, - NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7, - NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8, -} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h deleted file mode 100644 index e11b2dbe5288..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h +++ /dev/null @@ -1,162 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */ - -typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS { - NvU32 feHwSysCap; - NvU32 windowPresentMask; - NvBool bFbRemapperEnabled; - NvU32 numHeads; - NvBool bPrimaryVga; - NvU32 i2cPort; - NvU32 internalDispActiveMask; -} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS; - -#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8 - -#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19 - -typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO { - NvU32 size; - NvU32 alignment; -} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO; - -typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO { - NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT]; -} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO; - -typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS { - NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; -} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS; - -#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */ - -typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO { - NvU32 engDesc; - NvU32 ctxAttr; - NvU32 ctxBufferSize; - NvU32 addrSpaceList; - NvU32 registerBase; -} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO; -#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS 0x40 - -#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */ - -typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS { - NvU32 numConstructedFalcons; - NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS]; -} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS; - -#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */ - -typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS { - NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8); - NV_DECLARE_ALIGNED(NvU64 instMemSize, 8); - NvU32 instMemAddrSpace; - NvU32 instMemCpuCacheAttr; -} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS; - -#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */ - -typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS { - NvU32 addressSpace; - NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8); - NV_DECLARE_ALIGNED(NvU64 limit, 8); - NvU32 cacheSnoop; - NvU32 hclass; - NvU32 channelInstance; - NvBool valid; -} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS; - -#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */ - -#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128 - -typedef enum NV2080_INTR_CATEGORY { - NV2080_INTR_CATEGORY_DEFAULT = 0, - NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1, - NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2, - NV2080_INTR_CATEGORY_RUNLIST = 3, - NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4, - NV2080_INTR_CATEGORY_UVM_OWNED = 5, - NV2080_INTR_CATEGORY_UVM_SHARED = 6, - NV2080_INTR_CATEGORY_ENUM_COUNT = 7, -} NV2080_INTR_CATEGORY; - -typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP { - NvU8 subtreeStart; - NvU8 subtreeEnd; -} NV2080_INTR_CATEGORY_SUBTREE_MAP; - -typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY { - NvU16 engineIdx; - NvU32 pmcIntrMask; - NvU32 vectorStall; - NvU32 vectorNonStall; -} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY; - -typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS { - NvU32 tableLen; - NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE]; - NV2080_INTR_CATEGORY_SUBTREE_MAP subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT]; -} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS; - -#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */ - -typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS { - NvU32 fbsrType; - NvU32 numRegions; - NvHandle hClient; - NvHandle hSysMem; - NV_DECLARE_ALIGNED(NvU64 gspFbAllocsSysOffset, 8); - NvBool bEnteringGcoffState; -} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS; - -#define NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS_MESSAGE_ID" */ - -typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS { - NvU32 fbsrType; - NvHandle hClient; - NvHandle hVidMem; - NV_DECLARE_ALIGNED(NvU64 vidOffset, 8); - NV_DECLARE_ALIGNED(NvU64 sysOffset, 8); - NV_DECLARE_ALIGNED(NvU64 size, 8); -} NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS; - -#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */ - -#define NV2080_CTRL_ACPI_DSM_READ_SIZE (0x1000) /* finn: Evaluated from "(4 * 1024)" */ - -typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS { - NvU32 status; - NvU16 backLightDataSize; - NvU8 backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE]; -} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h deleted file mode 100644 index 977e59818533..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h +++ /dev/null @@ -1,95 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define GMMU_FMT_MAX_LEVELS 6U - -#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */ - -typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS { - /*! - * [in] GPU sub-device handle - this API only supports unicast. - * Pass 0 to use subDeviceId instead. - */ - NvHandle hSubDevice; - - /*! - * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero. - */ - NvU32 subDeviceId; - - /*! - * [in] Page size (VA coverage) of the level to reserve. - * This need not be a leaf (page table) page size - it can be - * the coverage of an arbitrary level (including root page directory). - */ - NV_DECLARE_ALIGNED(NvU64 pageSize, 8); - - /*! - * [in] First GPU virtual address of the range to reserve. - * This must be aligned to pageSize. - */ - NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8); - - /*! - * [in] Last GPU virtual address of the range to reserve. - * This (+1) must be aligned to pageSize. - */ - NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8); - - /*! - * [in] Number of PDE levels to copy. - */ - NvU32 numLevelsToCopy; - - /*! - * [in] Per-level information. - */ - struct { - /*! - * Physical address of this page level instance. - */ - NV_DECLARE_ALIGNED(NvU64 physAddress, 8); - - /*! - * Size in bytes allocated for this level instance. - */ - NV_DECLARE_ALIGNED(NvU64 size, 8); - - /*! - * Aperture in which this page level instance resides. - */ - NvU32 aperture; - - /*! - * Page shift corresponding to the level - */ - NvU8 pageShift; - } levels[GMMU_FMT_MAX_LEVELS]; -} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h deleted file mode 100644 index 684045796232..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrla06f_ctrla06fgpfifo_h__ -#define __src_common_sdk_nvidia_inc_ctrl_ctrla06f_ctrla06fgpfifo_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */ - -typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS { - NvBool bEnable; - NvBool bSkipSubmit; -} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS; - -#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */ - -typedef struct NVA06F_CTRL_BIND_PARAMS { - NvU32 engineType; -} NVA06F_CTRL_BIND_PARAMS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h deleted file mode 100644 index 5c5a004a8031..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_nvlimits_h__ -#define __src_common_sdk_nvidia_inc_nvlimits_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV_MAX_SUBDEVICES 8 - -#define NV_PROC_NAME_MAX_LENGTH 100U - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h deleted file mode 100644 index 51b5591c603e..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h +++ /dev/null @@ -1,148 +0,0 @@ -#ifndef __src_common_sdk_nvidia_inc_nvos_h__ -#define __src_common_sdk_nvidia_inc_nvos_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NVOS02_FLAGS_PHYSICALITY 7:4 -#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS (0x00000000) -#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS (0x00000001) -#define NVOS02_FLAGS_LOCATION 11:8 -#define NVOS02_FLAGS_LOCATION_PCI (0x00000000) -#define NVOS02_FLAGS_LOCATION_AGP (0x00000001) -#define NVOS02_FLAGS_LOCATION_VIDMEM (0x00000002) -#define NVOS02_FLAGS_COHERENCY 15:12 -#define NVOS02_FLAGS_COHERENCY_UNCACHED (0x00000000) -#define NVOS02_FLAGS_COHERENCY_CACHED (0x00000001) -#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE (0x00000002) -#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH (0x00000003) -#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT (0x00000004) -#define NVOS02_FLAGS_COHERENCY_WRITE_BACK (0x00000005) -#define NVOS02_FLAGS_ALLOC 17:16 -#define NVOS02_FLAGS_ALLOC_NONE (0x00000001) -#define NVOS02_FLAGS_GPU_CACHEABLE 18:18 -#define NVOS02_FLAGS_GPU_CACHEABLE_NO (0x00000000) -#define NVOS02_FLAGS_GPU_CACHEABLE_YES (0x00000001) - -#define NVOS02_FLAGS_KERNEL_MAPPING 19:19 -#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP (0x00000000) -#define NVOS02_FLAGS_KERNEL_MAPPING_MAP (0x00000001) -#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY 20:20 -#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO (0x00000000) -#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES (0x00000001) - -#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY 21:21 -#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO (0x00000000) -#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES (0x00000001) - -#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY 22:22 -#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO (0x00000000) -#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES (0x00000001) - -#define NVOS02_FLAGS_PEER_MAP_OVERRIDE 23:23 -#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT (0x00000000) -#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED (0x00000001) - -#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT 24:24 -#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE (0x00000001) - -#define NVOS02_FLAGS_MEMORY_PROTECTION 26:25 -#define NVOS02_FLAGS_MEMORY_PROTECTION_DEFAULT (0x00000000) -#define NVOS02_FLAGS_MEMORY_PROTECTION_PROTECTED (0x00000001) -#define NVOS02_FLAGS_MEMORY_PROTECTION_UNPROTECTED (0x00000002) - -#define NVOS02_FLAGS_MAPPING 31:30 -#define NVOS02_FLAGS_MAPPING_DEFAULT (0x00000000) -#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001) -#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002) - -#define NV01_EVENT_CLIENT_RM (0x04000000) - -typedef struct -{ - NvV32 channelInstance; // One of the n channel instances of a given channel type. - // Note that core channel has only one instance - // while all others have two (one per head). - NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer - NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications - NvU32 offset; // Initial offset for put/get, usually zero. - NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs - - NvU32 flags; -#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1 -#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000 -#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001 - -} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS; - -typedef struct -{ - NvV32 channelInstance; // One of the n channel instances of a given channel type. - // All PIO channels have two instances (one per head). - NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors. - NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel -} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS; - -typedef struct -{ - NvU32 size; - NvU32 prohibitMultipleInstances; - NvU32 engineInstance; // Select NVDEC0 or NVDEC1 or NVDEC2 -} NV_BSP_ALLOCATION_PARAMETERS; - -typedef struct -{ - NvU32 size; - NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of MSENC? - NvU32 engineInstance; // Select MSENC/NVENC0 or NVENC1 or NVENC2 -} NV_MSENC_ALLOCATION_PARAMETERS; - -typedef struct -{ - NvU32 size; - NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of NVJPG? - NvU32 engineInstance; -} NV_NVJPG_ALLOCATION_PARAMETERS; - -typedef struct -{ - NvU32 size; - NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA? -} NV_OFA_ALLOCATION_PARAMETERS; - -typedef struct -{ - NvU32 index; - NvV32 flags; - NvU64 vaSize NV_ALIGN_BYTES(8); - NvU64 vaStartInternal NV_ALIGN_BYTES(8); - NvU64 vaLimitInternal NV_ALIGN_BYTES(8); - NvU32 bigPageSize; - NvU64 vaBase NV_ALIGN_BYTES(8); -} NV_VASPACE_ALLOCATION_PARAMETERS; - -#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 // unverified, 0xa0a0a0a0a0a0a0a0 -> verified -} GspFwWprMeta; - -#define GSP_FW_WPR_META_REVISION 1 -#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h deleted file mode 100644 index 4eff473e8990..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h +++ /dev/null @@ -1,82 +0,0 @@ -#ifndef __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__ -#define __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef struct { - // - // Version 1 - // Version 2 - // Version 3 = for Partition boot - // Version 4 = for eb riscv boot - // Version 5 = Support signing entire RISC-V image as "code" in code section for hopper and later. - // - NvU32 version; // structure version - NvU32 bootloaderOffset; - NvU32 bootloaderSize; - NvU32 bootloaderParamOffset; - NvU32 bootloaderParamSize; - NvU32 riscvElfOffset; - NvU32 riscvElfSize; - NvU32 appVersion; // Changelist number associated with the image - // - // Manifest contains information about Monitor and it is - // input to BR - // - NvU32 manifestOffset; - NvU32 manifestSize; - // - // Monitor Data offset within RISCV image and size - // - NvU32 monitorDataOffset; - NvU32 monitorDataSize; - // - // Monitor Code offset withtin RISCV image and size - // - NvU32 monitorCodeOffset; - NvU32 monitorCodeSize; - NvU32 bIsMonitorEnabled; - // - // Swbrom Code offset within RISCV image and size - // - NvU32 swbromCodeOffset; - NvU32 swbromCodeSize; - // - // Swbrom Data offset within RISCV image and size - // - NvU32 swbromDataOffset; - NvU32 swbromDataSize; - // - // Total size of FB carveout (image and reserved space). - // - NvU32 fbReservedSize; - // - // Indicates whether the entire RISC-V image is signed as "code" in code section. - // - NvU32 bSignedAsCode; -} RM_RISCV_UCODE_DESC; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h deleted file mode 100644 index 341ab0dbeaf2..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h +++ /dev/null @@ -1,100 +0,0 @@ -#ifndef __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__ -#define __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef enum GSP_SEQ_BUF_OPCODE -{ - GSP_SEQ_BUF_OPCODE_REG_WRITE = 0, - GSP_SEQ_BUF_OPCODE_REG_MODIFY, - GSP_SEQ_BUF_OPCODE_REG_POLL, - GSP_SEQ_BUF_OPCODE_DELAY_US, - GSP_SEQ_BUF_OPCODE_REG_STORE, - GSP_SEQ_BUF_OPCODE_CORE_RESET, - GSP_SEQ_BUF_OPCODE_CORE_START, - GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT, - GSP_SEQ_BUF_OPCODE_CORE_RESUME, -} GSP_SEQ_BUF_OPCODE; - -#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode) \ - ((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE) / sizeof(NvU32)) : \ - (opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \ - (opcode == GSP_SEQ_BUF_OPCODE_REG_POLL) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL) / sizeof(NvU32)) : \ - (opcode == GSP_SEQ_BUF_OPCODE_DELAY_US) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US) / sizeof(NvU32)) : \ - (opcode == GSP_SEQ_BUF_OPCODE_REG_STORE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE) / sizeof(NvU32)) : \ - /* GSP_SEQ_BUF_OPCODE_CORE_RESET */ \ - /* GSP_SEQ_BUF_OPCODE_CORE_START */ \ - /* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */ \ - /* GSP_SEQ_BUF_OPCODE_CORE_RESUME */ \ - 0) - -typedef struct -{ - NvU32 addr; - NvU32 val; -} GSP_SEQ_BUF_PAYLOAD_REG_WRITE; - -typedef struct -{ - NvU32 addr; - NvU32 mask; - NvU32 val; -} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY; - -typedef struct -{ - NvU32 addr; - NvU32 mask; - NvU32 val; - NvU32 timeout; - NvU32 error; -} GSP_SEQ_BUF_PAYLOAD_REG_POLL; - -typedef struct -{ - NvU32 val; -} GSP_SEQ_BUF_PAYLOAD_DELAY_US; - -typedef struct -{ - NvU32 addr; - NvU32 index; -} GSP_SEQ_BUF_PAYLOAD_REG_STORE; - -typedef struct GSP_SEQUENCER_BUFFER_CMD -{ - GSP_SEQ_BUF_OPCODE opCode; - union - { - GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite; - GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify; - GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll; - GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs; - GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore; - } payload; -} GSP_SEQUENCER_BUFFER_CMD; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h deleted file mode 100644 index 3144e9beac61..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef __src_nvidia_generated_g_allclasses_h__ -#define __src_nvidia_generated_g_allclasses_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e) - -#define NV04_DISPLAY_COMMON (0x00000073) - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h deleted file mode 100644 index 6b8921138c7d..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef __src_nvidia_generated_g_chipset_nvoc_h__ -#define __src_nvidia_generated_g_chipset_nvoc_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef struct -{ - NvU16 deviceID; // deviceID - NvU16 vendorID; // vendorID - NvU16 subdeviceID; // subsystem deviceID - NvU16 subvendorID; // subsystem vendorID - NvU8 revisionID; // revision ID -} BUSINFO; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h deleted file mode 100644 index a5128f00225b..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef __src_nvidia_generated_g_fbsr_nvoc_h__ -#define __src_nvidia_generated_g_fbsr_nvoc_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define FBSR_TYPE_DMA 4 // Copy using DMA. Fastest. - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h deleted file mode 100644 index 5641a21cacca..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef __src_nvidia_generated_g_gpu_nvoc_h__ -#define __src_nvidia_generated_g_gpu_nvoc_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef enum -{ - COMPUTE_BRANDING_TYPE_NONE, - COMPUTE_BRANDING_TYPE_TESLA, -} COMPUTE_BRANDING_TYPE; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h deleted file mode 100644 index b5ad55f854dc..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h +++ /dev/null @@ -1,62 +0,0 @@ -#ifndef __src_nvidia_generated_g_kernel_channel_nvoc_h__ -#define __src_nvidia_generated_g_kernel_channel_nvoc_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef enum { - /*! - * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by - * kernel CPU-RM clients. - */ - ERROR_NOTIFIER_TYPE_UNKNOWN = 0, - /*! @brief Error notifier is explicitly not set. - * - * The corresponding hErrorContext or hEccErrorContext must be - * NV01_NULL_OBJECT. - */ - ERROR_NOTIFIER_TYPE_NONE, - /*! @brief Error notifier is a ContextDma */ - ERROR_NOTIFIER_TYPE_CTXDMA, - /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */ - ERROR_NOTIFIER_TYPE_MEMORY -} ErrorNotifierType; - -#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0 -#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0 -#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1 -#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2 -#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2 -#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN -#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE -#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA -#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY -#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4 -#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN -#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE -#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA -#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h deleted file mode 100644 index 946954ac5b3d..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h +++ /dev/null @@ -1,119 +0,0 @@ -#ifndef __src_nvidia_generated_g_kernel_fifo_nvoc_h__ -#define __src_nvidia_generated_g_kernel_fifo_nvoc_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef enum -{ - /* ************************************************************************* - * Bug 3820969 - * THINK BEFORE CHANGING ENUM ORDER HERE. - * VGPU-guest uses this same ordering. Because this enum is not versioned, - * changing the order here WILL BREAK old-guest-on-newer-host compatibility. - * ************************************************************************/ - - // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc., - ENGINE_INFO_TYPE_ENG_DESC = 0, - - // HW engine ID - ENGINE_INFO_TYPE_FIFO_TAG, - - // RM_ENGINE_TYPE_* - ENGINE_INFO_TYPE_RM_ENGINE_TYPE, - - // - // runlist id (meaning varies by GPU) - // Valid only for Esched-driven engines - // - ENGINE_INFO_TYPE_RUNLIST, - - // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_* - ENGINE_INFO_TYPE_MMU_FAULT_ID, - - // ROBUST_CHANNEL_* - ENGINE_INFO_TYPE_RC_MASK, - - // Reset Bit Position. On Ampere, only valid if not _INVALID - ENGINE_INFO_TYPE_RESET, - - // Interrupt Bit Position - ENGINE_INFO_TYPE_INTR, - - // log2(MC_ENGINE_*) - ENGINE_INFO_TYPE_MC, - - // The DEV_TYPE_ENUM for this engine - ENGINE_INFO_TYPE_DEV_TYPE_ENUM, - - // The particular instance of this engine type - ENGINE_INFO_TYPE_INSTANCE_ID, - - // - // The base address for this engine's NV_RUNLIST. Valid only on Ampere+ - // Valid only for Esched-driven engines - // - ENGINE_INFO_TYPE_RUNLIST_PRI_BASE, - - // - // If this entry is a host-driven engine. - // Update _isEngineInfoTypeValidForOnlyHostDriven when adding any new entry. - // - ENGINE_INFO_TYPE_IS_HOST_DRIVEN_ENGINE, - - // - // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+ - // Valid only for Esched-driven engines - // - ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID, - - // - // The base address for this engine's NV_CHRAM registers. Valid only on - // Ampere+ - // - // Valid only for Esched-driven engines - // - ENGINE_INFO_TYPE_CHRAM_PRI_BASE, - - // This entry added to copy data at RMCTRL_EXPORT() call for Kernel RM - ENGINE_INFO_TYPE_KERNEL_RM_MAX, - // Used for iterating the engine info table by the index passed. - ENGINE_INFO_TYPE_INVALID = ENGINE_INFO_TYPE_KERNEL_RM_MAX, - - // Size of FIFO_ENGINE_LIST.engineData - ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE = ENGINE_INFO_TYPE_INVALID, - - // Input-only parameter for kfifoEngineInfoXlate. - ENGINE_INFO_TYPE_PBDMA_ID - - /* ************************************************************************* - * Bug 3820969 - * THINK BEFORE CHANGING ENUM ORDER HERE. - * VGPU-guest uses this same ordering. Because this enum is not versioned, - * changing the order here WILL BREAK old-guest-on-newer-host compatibility. - * ************************************************************************/ -} ENGINE_INFO_TYPE; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h deleted file mode 100644 index daabaee41c87..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef __src_nvidia_generated_g_mem_desc_nvoc_h__ -#define __src_nvidia_generated_g_mem_desc_nvoc_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define ADDR_SYSMEM 1 // System memory (PCI) -#define ADDR_FBMEM 2 // Frame buffer memory space - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h deleted file mode 100644 index 10121218f4d3..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef __src_nvidia_generated_g_os_nvoc_h__ -#define __src_nvidia_generated_g_os_nvoc_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef struct PACKED_REGISTRY_ENTRY -{ - NvU32 nameOffset; - NvU8 type; - NvU32 data; - NvU32 length; -} PACKED_REGISTRY_ENTRY; - -typedef struct PACKED_REGISTRY_TABLE -{ - NvU32 size; - NvU32 numEntries; - PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries); -} PACKED_REGISTRY_TABLE; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h deleted file mode 100644 index 8d925e24faea..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h +++ /dev/null @@ -1,124 +0,0 @@ -#ifndef __src_nvidia_generated_g_rpc_structures_h__ -#define __src_nvidia_generated_g_rpc_structures_h__ -#include -#include - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef struct rpc_alloc_memory_v13_01 -{ - NvHandle hClient; - NvHandle hDevice; - NvHandle hMemory; - NvU32 hClass; - NvU32 flags; - NvU32 pteAdjust; - NvU32 format; - NvU64 length NV_ALIGN_BYTES(8); - NvU32 pageCount; - struct pte_desc pteDesc; -} rpc_alloc_memory_v13_01; - -typedef struct rpc_free_v03_00 -{ - NVOS00_PARAMETERS_v03_00 params; -} rpc_free_v03_00; - -typedef struct rpc_unloading_guest_driver_v1F_07 -{ - NvBool bInPMTransition; - NvBool bGc6Entering; - NvU32 newLevel; -} rpc_unloading_guest_driver_v1F_07; - -typedef struct rpc_update_bar_pde_v15_00 -{ - UpdateBarPde_v15_00 info; -} rpc_update_bar_pde_v15_00; - -typedef struct rpc_gsp_rm_alloc_v03_00 -{ - NvHandle hClient; - NvHandle hParent; - NvHandle hObject; - NvU32 hClass; - NvU32 status; - NvU32 paramsSize; - NvU32 flags; - NvU8 reserved[4]; - NvU8 params[]; -} rpc_gsp_rm_alloc_v03_00; - -typedef struct rpc_gsp_rm_control_v03_00 -{ - NvHandle hClient; - NvHandle hObject; - NvU32 cmd; - NvU32 status; - NvU32 paramsSize; - NvU32 flags; - NvU8 params[]; -} rpc_gsp_rm_control_v03_00; - -typedef struct rpc_run_cpu_sequencer_v17_00 -{ - NvU32 bufferSizeDWord; - NvU32 cmdIndex; - NvU32 regSaveArea[8]; - NvU32 commandBuffer[]; -} rpc_run_cpu_sequencer_v17_00; - -typedef struct rpc_post_event_v17_00 -{ - NvHandle hClient; - NvHandle hEvent; - NvU32 notifyIndex; - NvU32 data; - NvU16 info16; - NvU32 status; - NvU32 eventDataSize; - NvBool bNotifyList; - NvU8 eventData[]; -} rpc_post_event_v17_00; - -typedef struct rpc_rc_triggered_v17_02 -{ - NvU32 nv2080EngineType; - NvU32 chid; - NvU32 exceptType; - NvU32 scope; - NvU16 partitionAttributionId; -} rpc_rc_triggered_v17_02; - -typedef struct rpc_os_error_log_v17_00 -{ - NvU32 exceptType; - NvU32 runlistId; - NvU32 chid; - char errString[0x100]; -} rpc_os_error_log_v17_00; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h deleted file mode 100644 index e9fed4140468..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h +++ /dev/null @@ -1,45 +0,0 @@ -#ifndef __src_nvidia_generated_g_sdk_structures_h__ -#define __src_nvidia_generated_g_sdk_structures_h__ -#include - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef struct NVOS00_PARAMETERS_v03_00 -{ - NvHandle hRoot; - NvHandle hObjectParent; - NvHandle hObjectOld; - NvV32 status; -} NVOS00_PARAMETERS_v03_00; - -typedef struct UpdateBarPde_v15_00 -{ - NV_RPC_UPDATE_PDE_BAR_TYPE barType; - NvU64 entryValue NV_ALIGN_BYTES(8); - NvU64 entryLevelShift NV_ALIGN_BYTES(8); -} UpdateBarPde_v15_00; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h deleted file mode 100644 index af50b11ec3b4..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h +++ /dev/null @@ -1,74 +0,0 @@ -#ifndef __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__ -#define __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__ -#include - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef struct DOD_METHOD_DATA -{ - NV_STATUS status; - NvU32 acpiIdListLen; - NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; -} DOD_METHOD_DATA; - -typedef struct JT_METHOD_DATA -{ - NV_STATUS status; - NvU32 jtCaps; - NvU16 jtRevId; - NvBool bSBIOSCaps; -} JT_METHOD_DATA; - -typedef struct MUX_METHOD_DATA_ELEMENT -{ - NvU32 acpiId; - NvU32 mode; - NV_STATUS status; -} MUX_METHOD_DATA_ELEMENT; - -typedef struct MUX_METHOD_DATA -{ - NvU32 tableLen; - MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; - MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; -} MUX_METHOD_DATA; - -typedef struct CAPS_METHOD_DATA -{ - NV_STATUS status; - NvU32 optimusCaps; -} CAPS_METHOD_DATA; - -typedef struct ACPI_METHOD_DATA -{ - NvBool bValid; - DOD_METHOD_DATA dodMethodData; - JT_METHOD_DATA jtMethodData; - MUX_METHOD_DATA muxMethodData; - CAPS_METHOD_DATA capsMethodData; -} ACPI_METHOD_DATA; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h deleted file mode 100644 index e3160c60036d..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h +++ /dev/null @@ -1,86 +0,0 @@ -#ifndef __src_nvidia_inc_kernel_gpu_gpu_engine_type_h__ -#define __src_nvidia_inc_kernel_gpu_gpu_engine_type_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef enum -{ - RM_ENGINE_TYPE_NULL = (0x00000000), - RM_ENGINE_TYPE_GR0 = (0x00000001), - RM_ENGINE_TYPE_GR1 = (0x00000002), - RM_ENGINE_TYPE_GR2 = (0x00000003), - RM_ENGINE_TYPE_GR3 = (0x00000004), - RM_ENGINE_TYPE_GR4 = (0x00000005), - RM_ENGINE_TYPE_GR5 = (0x00000006), - RM_ENGINE_TYPE_GR6 = (0x00000007), - RM_ENGINE_TYPE_GR7 = (0x00000008), - RM_ENGINE_TYPE_COPY0 = (0x00000009), - RM_ENGINE_TYPE_COPY1 = (0x0000000a), - RM_ENGINE_TYPE_COPY2 = (0x0000000b), - RM_ENGINE_TYPE_COPY3 = (0x0000000c), - RM_ENGINE_TYPE_COPY4 = (0x0000000d), - RM_ENGINE_TYPE_COPY5 = (0x0000000e), - RM_ENGINE_TYPE_COPY6 = (0x0000000f), - RM_ENGINE_TYPE_COPY7 = (0x00000010), - RM_ENGINE_TYPE_COPY8 = (0x00000011), - RM_ENGINE_TYPE_COPY9 = (0x00000012), - RM_ENGINE_TYPE_NVDEC0 = (0x0000001d), - RM_ENGINE_TYPE_NVDEC1 = (0x0000001e), - RM_ENGINE_TYPE_NVDEC2 = (0x0000001f), - RM_ENGINE_TYPE_NVDEC3 = (0x00000020), - RM_ENGINE_TYPE_NVDEC4 = (0x00000021), - RM_ENGINE_TYPE_NVDEC5 = (0x00000022), - RM_ENGINE_TYPE_NVDEC6 = (0x00000023), - RM_ENGINE_TYPE_NVDEC7 = (0x00000024), - RM_ENGINE_TYPE_NVENC0 = (0x00000025), - RM_ENGINE_TYPE_NVENC1 = (0x00000026), - RM_ENGINE_TYPE_NVENC2 = (0x00000027), - RM_ENGINE_TYPE_VP = (0x00000028), - RM_ENGINE_TYPE_ME = (0x00000029), - RM_ENGINE_TYPE_PPP = (0x0000002a), - RM_ENGINE_TYPE_MPEG = (0x0000002b), - RM_ENGINE_TYPE_SW = (0x0000002c), - RM_ENGINE_TYPE_TSEC = (0x0000002d), - RM_ENGINE_TYPE_VIC = (0x0000002e), - RM_ENGINE_TYPE_MP = (0x0000002f), - RM_ENGINE_TYPE_SEC2 = (0x00000030), - RM_ENGINE_TYPE_HOST = (0x00000031), - RM_ENGINE_TYPE_DPU = (0x00000032), - RM_ENGINE_TYPE_PMU = (0x00000033), - RM_ENGINE_TYPE_FBFLCN = (0x00000034), - RM_ENGINE_TYPE_NVJPEG0 = (0x00000035), - RM_ENGINE_TYPE_NVJPEG1 = (0x00000036), - RM_ENGINE_TYPE_NVJPEG2 = (0x00000037), - RM_ENGINE_TYPE_NVJPEG3 = (0x00000038), - RM_ENGINE_TYPE_NVJPEG4 = (0x00000039), - RM_ENGINE_TYPE_NVJPEG5 = (0x0000003a), - RM_ENGINE_TYPE_NVJPEG6 = (0x0000003b), - RM_ENGINE_TYPE_NVJPEG7 = (0x0000003c), - RM_ENGINE_TYPE_OFA = (0x0000003d), - RM_ENGINE_TYPE_LAST = (0x0000003e), -} RM_ENGINE_TYPE; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h deleted file mode 100644 index 3abec59f0cc4..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__ -#define __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB (96 << 10) // All architectures - -#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE ((48 << 10) * 2048) // Support 2048 channels - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h deleted file mode 100644 index 4033a6f85a76..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h +++ /dev/null @@ -1,57 +0,0 @@ -#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__ -#define __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef struct { - RmPhysAddr sharedMemPhysAddr; - NvU32 pageTableEntryCount; - NvLength cmdQueueOffset; - NvLength statQueueOffset; - NvLength locklessCmdQueueOffset; - NvLength locklessStatQueueOffset; -} MESSAGE_QUEUE_INIT_ARGUMENTS; - -typedef struct { - NvU32 oldLevel; - NvU32 flags; - NvBool bInPMTransition; -} GSP_SR_INIT_ARGUMENTS; - -typedef struct -{ - MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments; - GSP_SR_INIT_ARGUMENTS srInitArguments; - NvU32 gpuInstance; - - struct - { - NvU64 pa; - NvU64 size; - } profilerArgs; -} GSP_ARGUMENTS_CACHED; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h deleted file mode 100644 index eeab25a5e290..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h +++ /dev/null @@ -1,174 +0,0 @@ -#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__ -#define __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -typedef struct GSP_VF_INFO -{ - NvU32 totalVFs; - NvU32 firstVFOffset; - NvU64 FirstVFBar0Address; - NvU64 FirstVFBar1Address; - NvU64 FirstVFBar2Address; - NvBool b64bitBar0; - NvBool b64bitBar1; - NvBool b64bitBar2; -} GSP_VF_INFO; - -typedef struct GspSMInfo_t -{ - NvU32 version; - NvU32 regBankCount; - NvU32 regBankRegCount; - NvU32 maxWarpsPerSM; - NvU32 maxThreadsPerWarp; - NvU32 geomGsObufEntries; - NvU32 geomXbufEntries; - NvU32 maxSPPerSM; - NvU32 rtCoreCount; -} GspSMInfo; - -typedef struct GspStaticConfigInfo_t -{ - NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE]; - NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo; - NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS gpcInfo; - NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS tpcInfo[MAX_GPC_COUNT]; - NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS zcullInfo[MAX_GPC_COUNT]; - NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo; - NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams; - COMPUTE_BRANDING_TYPE computeBranding; - - NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps; - NvU32 sriovMaxGfid; - - NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX]; - - GspSMInfo SM_info; - - NvBool poisonFuseEnabled; - - NvU64 fb_length; - NvU32 fbio_mask; - NvU32 fb_bus_width; - NvU32 fb_ram_type; - NvU32 fbp_mask; - NvU32 l2_cache_size; - - NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL]; - NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL]; - - NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; - NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; - NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH]; - NvBool bGpuInternalSku; - NvBool bIsQuadroGeneric; - NvBool bIsQuadroAd; - NvBool bIsNvidiaNvs; - NvBool bIsVgx; - NvBool bGeforceSmb; - NvBool bIsTitan; - NvBool bIsTesla; - NvBool bIsMobile; - NvBool bIsGc6Rtd3Allowed; - NvBool bIsGcOffRtd3Allowed; - NvBool bIsGcoffLegacyAllowed; - - NvU64 bar1PdeBase; - NvU64 bar2PdeBase; - - NvBool bVbiosValid; - NvU32 vbiosSubVendor; - NvU32 vbiosSubDevice; - - NvBool bPageRetirementSupported; - - NvBool bSplitVasBetweenServerClientRm; - - NvBool bClRootportNeedsNosnoopWAR; - - VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads; - VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution; - NvU64 displaylessMaxPixels; - - // Client handle for internal RMAPI control. - NvHandle hInternalClient; - - // Device handle for internal RMAPI control. - NvHandle hInternalDevice; - - // Subdevice handle for internal RMAPI control. - NvHandle hInternalSubdevice; - - NvBool bSelfHostedMode; - NvBool bAtsSupported; - - NvBool bIsGpuUefi; -} GspStaticConfigInfo; - -typedef struct GspSystemInfo -{ - NvU64 gpuPhysAddr; - NvU64 gpuPhysFbAddr; - NvU64 gpuPhysInstAddr; - NvU64 nvDomainBusDeviceFunc; - NvU64 simAccessBufPhysAddr; - NvU64 pcieAtomicsOpMask; - NvU64 consoleMemSize; - NvU64 maxUserVa; - NvU32 pciConfigMirrorBase; - NvU32 pciConfigMirrorSize; - NvU8 oorArch; - NvU64 clPdbProperties; - NvU32 Chipset; - NvBool bGpuBehindBridge; - NvBool bMnocAvailable; - NvBool bUpstreamL0sUnsupported; - NvBool bUpstreamL1Unsupported; - NvBool bUpstreamL1PorSupported; - NvBool bUpstreamL1PorMobileOnly; - NvU8 upstreamAddressValid; - BUSINFO FHBBusInfo; - BUSINFO chipsetIDInfo; - ACPI_METHOD_DATA acpiMethodData; - NvU32 hypervisorType; - NvBool bIsPassthru; - NvU64 sysTimerOffsetNs; - GSP_VF_INFO gspVFInfo; -} GspSystemInfo; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h deleted file mode 100644 index bd5e01f9814b..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h +++ /dev/null @@ -1,57 +0,0 @@ -#ifndef __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__ -#define __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define MC_ENGINE_IDX_DISP 2 - -#define MC_ENGINE_IDX_CE0 15 - -#define MC_ENGINE_IDX_CE9 24 - -#define MC_ENGINE_IDX_MSENC 38 - -#define MC_ENGINE_IDX_MSENC2 40 - -#define MC_ENGINE_IDX_GSP 49 -#define MC_ENGINE_IDX_NVJPG 50 -#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG -#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG - -#define MC_ENGINE_IDX_NVJPEG7 57 - -#define MC_ENGINE_IDX_BSP 64 -#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP -#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC - -#define MC_ENGINE_IDX_NVDEC7 71 - -#define MC_ENGINE_IDX_OFA0 80 - -#define MC_ENGINE_IDX_GR 82 -#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h deleted file mode 100644 index 366447a368bf..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef __src_nvidia_inc_kernel_gpu_nvbitmask_h__ -#define __src_nvidia_inc_kernel_gpu_nvbitmask_h__ -#include - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NVGPU_ENGINE_CAPS_MASK_BITS 32 -#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1) - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h deleted file mode 100644 index 4a850dad4776..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef __src_nvidia_inc_kernel_os_nv_memory_type_h__ -#define __src_nvidia_inc_kernel_os_nv_memory_type_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define NV_MEMORY_WRITECOMBINED 2 - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h deleted file mode 100644 index 73c57f235f6a..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h +++ /dev/null @@ -1,262 +0,0 @@ -#ifndef __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__ -#define __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -#ifndef X -# define X(UNIT, RPC) NV_VGPU_MSG_FUNCTION_##RPC, -# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H -enum { -#endif - X(RM, NOP) // 0 - X(RM, SET_GUEST_SYSTEM_INFO) // 1 - X(RM, ALLOC_ROOT) // 2 - X(RM, ALLOC_DEVICE) // 3 deprecated - X(RM, ALLOC_MEMORY) // 4 - X(RM, ALLOC_CTX_DMA) // 5 - X(RM, ALLOC_CHANNEL_DMA) // 6 - X(RM, MAP_MEMORY) // 7 - X(RM, BIND_CTX_DMA) // 8 deprecated - X(RM, ALLOC_OBJECT) // 9 - X(RM, FREE) //10 - X(RM, LOG) //11 - X(RM, ALLOC_VIDMEM) //12 - X(RM, UNMAP_MEMORY) //13 - X(RM, MAP_MEMORY_DMA) //14 - X(RM, UNMAP_MEMORY_DMA) //15 - X(RM, GET_EDID) //16 - X(RM, ALLOC_DISP_CHANNEL) //17 - X(RM, ALLOC_DISP_OBJECT) //18 - X(RM, ALLOC_SUBDEVICE) //19 - X(RM, ALLOC_DYNAMIC_MEMORY) //20 - X(RM, DUP_OBJECT) //21 - X(RM, IDLE_CHANNELS) //22 - X(RM, ALLOC_EVENT) //23 - X(RM, SEND_EVENT) //24 - X(RM, REMAPPER_CONTROL) //25 deprecated - X(RM, DMA_CONTROL) //26 - X(RM, DMA_FILL_PTE_MEM) //27 - X(RM, MANAGE_HW_RESOURCE) //28 - X(RM, BIND_ARBITRARY_CTX_DMA) //29 deprecated - X(RM, CREATE_FB_SEGMENT) //30 - X(RM, DESTROY_FB_SEGMENT) //31 - X(RM, ALLOC_SHARE_DEVICE) //32 - X(RM, DEFERRED_API_CONTROL) //33 - X(RM, REMOVE_DEFERRED_API) //34 - X(RM, SIM_ESCAPE_READ) //35 - X(RM, SIM_ESCAPE_WRITE) //36 - X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA) //37 - X(RM, FREE_VIDMEM_VIRT) //38 - X(RM, PERF_GET_PSTATE_INFO) //39 deprecated for vGPU, used by GSP - X(RM, PERF_GET_PERFMON_SAMPLE) //40 - X(RM, PERF_GET_VIRTUAL_PSTATE_INFO) //41 deprecated - X(RM, PERF_GET_LEVEL_INFO) //42 - X(RM, MAP_SEMA_MEMORY) //43 - X(RM, UNMAP_SEMA_MEMORY) //44 - X(RM, SET_SURFACE_PROPERTIES) //45 - X(RM, CLEANUP_SURFACE) //46 - X(RM, UNLOADING_GUEST_DRIVER) //47 - X(RM, TDR_SET_TIMEOUT_STATE) //48 - X(RM, SWITCH_TO_VGA) //49 - X(RM, GPU_EXEC_REG_OPS) //50 - X(RM, GET_STATIC_INFO) //51 - X(RM, ALLOC_VIRTMEM) //52 - X(RM, UPDATE_PDE_2) //53 - X(RM, SET_PAGE_DIRECTORY) //54 - X(RM, GET_STATIC_PSTATE_INFO) //55 - X(RM, TRANSLATE_GUEST_GPU_PTES) //56 - X(RM, RESERVED_57) //57 - X(RM, RESET_CURRENT_GR_CONTEXT) //58 - X(RM, SET_SEMA_MEM_VALIDATION_STATE) //59 - X(RM, GET_ENGINE_UTILIZATION) //60 - X(RM, UPDATE_GPU_PDES) //61 - X(RM, GET_ENCODER_CAPACITY) //62 - X(RM, VGPU_PF_REG_READ32) //63 - X(RM, SET_GUEST_SYSTEM_INFO_EXT) //64 - X(GSP, GET_GSP_STATIC_INFO) //65 - X(RM, RMFS_INIT) //66 - X(RM, RMFS_CLOSE_QUEUE) //67 - X(RM, RMFS_CLEANUP) //68 - X(RM, RMFS_TEST) //69 - X(RM, UPDATE_BAR_PDE) //70 - X(RM, CONTINUATION_RECORD) //71 - X(RM, GSP_SET_SYSTEM_INFO) //72 - X(RM, SET_REGISTRY) //73 - X(GSP, GSP_INIT_POST_OBJGPU) //74 deprecated - X(RM, SUBDEV_EVENT_SET_NOTIFICATION) //75 deprecated - X(GSP, GSP_RM_CONTROL) //76 - X(RM, GET_STATIC_INFO2) //77 - X(RM, DUMP_PROTOBUF_COMPONENT) //78 - X(RM, UNSET_PAGE_DIRECTORY) //79 - X(RM, GET_CONSOLIDATED_STATIC_INFO) //80 - X(RM, GMMU_REGISTER_FAULT_BUFFER) //81 deprecated - X(RM, GMMU_UNREGISTER_FAULT_BUFFER) //82 deprecated - X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER) //83 deprecated - X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER) //84 deprecated - X(RM, CTRL_SET_VGPU_FB_USAGE) //85 - X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO) //86 - X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO) //87 - X(RM, CTRL_RESET_CHANNEL) //88 - X(RM, CTRL_RESET_ISOLATED_CHANNEL) //89 - X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT) //90 - X(RM, CTRL_CLK_GET_EXTENDED_INFO) //91 - X(RM, CTRL_PERF_BOOST) //92 - X(RM, CTRL_PERF_VPSTATES_GET_CONTROL) //93 - X(RM, CTRL_GET_ZBC_CLEAR_TABLE) //94 - X(RM, CTRL_SET_ZBC_COLOR_CLEAR) //95 - X(RM, CTRL_SET_ZBC_DEPTH_CLEAR) //96 - X(RM, CTRL_GPFIFO_SCHEDULE) //97 - X(RM, CTRL_SET_TIMESLICE) //98 - X(RM, CTRL_PREEMPT) //99 - X(RM, CTRL_FIFO_DISABLE_CHANNELS) //100 - X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL) //101 - X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL) //102 - X(GSP, GSP_RM_ALLOC) //103 - X(RM, CTRL_GET_P2P_CAPS_V2) //104 - X(RM, CTRL_CIPHER_AES_ENCRYPT) //105 - X(RM, CTRL_CIPHER_SESSION_KEY) //106 - X(RM, CTRL_CIPHER_SESSION_KEY_STATUS) //107 - X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES) //108 - X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES) //109 - X(RM, CTRL_DBG_SET_EXCEPTION_MASK) //110 - X(RM, CTRL_GPU_PROMOTE_CTX) //111 - X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND) //112 - X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE) //113 - X(RM, CTRL_GR_CTXSW_ZCULL_BIND) //114 - X(RM, CTRL_GPU_INITIALIZE_CTX) //115 - X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES) //116 - X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT) //117 - X(RM, CTRL_GET_LATEST_ECC_ADDRESSES) //118 - X(RM, CTRL_MC_SERVICE_INTERRUPTS) //119 - X(RM, CTRL_DMA_SET_DEFAULT_VASPACE) //120 - X(RM, CTRL_GET_CE_PCE_MASK) //121 - X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY) //122 - X(RM, CTRL_GET_NVLINK_PEER_ID_MASK) //123 - X(RM, CTRL_GET_NVLINK_STATUS) //124 - X(RM, CTRL_GET_P2P_CAPS) //125 - X(RM, CTRL_GET_P2P_CAPS_MATRIX) //126 - X(RM, RESERVED_0) //127 - X(RM, CTRL_RESERVE_PM_AREA_SMPC) //128 - X(RM, CTRL_RESERVE_HWPM_LEGACY) //129 - X(RM, CTRL_B0CC_EXEC_REG_OPS) //130 - X(RM, CTRL_BIND_PM_RESOURCES) //131 - X(RM, CTRL_DBG_SUSPEND_CONTEXT) //132 - X(RM, CTRL_DBG_RESUME_CONTEXT) //133 - X(RM, CTRL_DBG_EXEC_REG_OPS) //134 - X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG) //135 - X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE) //136 - X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE) //137 - X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG) //138 - X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE) //139 - X(RM, CTRL_ALLOC_PMA_STREAM) //140 - X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT) //141 - X(RM, CTRL_FB_GET_INFO_V2) //142 - X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES) //143 - X(RM, CTRL_GR_GET_CTX_BUFFER_INFO) //144 - X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES) //145 - X(RM, CTRL_GPU_EVICT_CTX) //146 - X(RM, CTRL_FB_GET_FS_INFO) //147 - X(RM, CTRL_GRMGR_GET_GR_FS_INFO) //148 - X(RM, CTRL_STOP_CHANNEL) //149 - X(RM, CTRL_GR_PC_SAMPLING_MODE) //150 - X(RM, CTRL_PERF_RATED_TDP_GET_STATUS) //151 - X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL) //152 - X(RM, CTRL_FREE_PMA_STREAM) //153 - X(RM, CTRL_TIMER_SET_GR_TICK_FREQ) //154 - X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB) //155 - X(RM, GET_CONSOLIDATED_GR_STATIC_INFO) //156 - X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP) //157 - X(RM, CTRL_GR_GET_TPC_PARTITION_MODE) //158 - X(RM, CTRL_GR_SET_TPC_PARTITION_MODE) //159 - X(UVM, UVM_PAGING_CHANNEL_ALLOCATE) //160 - X(UVM, UVM_PAGING_CHANNEL_DESTROY) //161 - X(UVM, UVM_PAGING_CHANNEL_MAP) //162 - X(UVM, UVM_PAGING_CHANNEL_UNMAP) //163 - X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM) //164 - X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES) //165 - X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION) //166 - X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL) //167 - X(RM, DCE_RM_INIT) //168 - X(RM, REGISTER_VIRTUAL_EVENT_BUFFER) //169 - X(RM, CTRL_EVENT_BUFFER_UPDATE_GET) //170 - X(RM, GET_PLCABLE_ADDRESS_KIND) //171 - X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2) //172 - X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM) //173 - X(RM, CTRL_GET_MMU_DEBUG_MODE) //174 - X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS) //175 - X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE) //176 - X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO) //177 - X(RM, DISABLE_CHANNELS) //178 - X(RM, CTRL_FABRIC_MEMORY_DESCRIBE) //179 - X(RM, CTRL_FABRIC_MEM_STATS) //180 - X(RM, SAVE_HIBERNATION_DATA) //181 - X(RM, RESTORE_HIBERNATION_DATA) //182 - X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED) //183 - X(RM, CTRL_EXEC_PARTITIONS_CREATE) //184 - X(RM, CTRL_EXEC_PARTITIONS_DELETE) //185 - X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN) //186 - X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX) //187 - X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION) //188 - X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK) //189 - X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER) //190 - X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS) // 191 - X(RM, CTRL_BUS_SET_P2P_MAPPING) // 192 - X(RM, CTRL_BUS_UNSET_P2P_MAPPING) // 193 - X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK) // 194 - X(RM, CTRL_GPU_MIGRATABLE_OPS) // 195 - X(RM, CTRL_GET_TOTAL_HS_CREDITS) // 196 - X(RM, CTRL_GET_HS_CREDITS) // 197 - X(RM, CTRL_SET_HS_CREDITS) // 198 - X(RM, CTRL_PM_AREA_PC_SAMPLER) // 199 - X(RM, INVALIDATE_TLB) // 200 - X(RM, NUM_FUNCTIONS) //END -#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H -}; -# undef X -# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H -#endif - -#ifndef E -# define E(RPC) NV_VGPU_MSG_EVENT_##RPC, -# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H -enum { -#endif - E(FIRST_EVENT = 0x1000) // 0x1000 - E(GSP_INIT_DONE) // 0x1001 - E(GSP_RUN_CPU_SEQUENCER) // 0x1002 - E(POST_EVENT) // 0x1003 - E(RC_TRIGGERED) // 0x1004 - E(MMU_FAULT_QUEUED) // 0x1005 - E(OS_ERROR_LOG) // 0x1006 - E(RG_LINE_INTR) // 0x1007 - E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008 - E(SIM_READ) // 0x1009 - E(SIM_WRITE) // 0x100a - E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b - E(UCODE_LIBOS_PRINT) // 0x100c - E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d - E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e - E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f - E(VGPU_CONFIG) // 0x1010 - E(DISPLAY_MODESET) // 0x1011 - E(EXTDEV_INTR_SERVICE) // 0x1012 - E(NVLINK_INBAND_RECEIVED_DATA_256) // 0x1013 - E(NVLINK_INBAND_RECEIVED_DATA_512) // 0x1014 - E(NVLINK_INBAND_RECEIVED_DATA_1024) // 0x1015 - E(NVLINK_INBAND_RECEIVED_DATA_2048) // 0x1016 - E(NVLINK_INBAND_RECEIVED_DATA_4096) // 0x1017 - E(TIMED_SEMAPHORE_RELEASE) // 0x1018 - E(NVLINK_IS_GPU_DEGRADED) // 0x1019 - E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK) // 0x101a - E(GSP_SEND_USER_SHARED_DATA) // 0x101b - E(NVLINK_FAULT_UP) // 0x101c - E(GSP_LOCKDOWN_NOTICE) // 0x101d - E(MIG_CI_CONFIG_UPDATE) // 0x101e - E(NUM_EVENTS) // END -#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H -}; -# undef E -# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H -#endif - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h deleted file mode 100644 index f14b23852456..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h +++ /dev/null @@ -1,51 +0,0 @@ -#ifndef __src_nvidia_kernel_inc_vgpu_rpc_headers_h__ -#define __src_nvidia_kernel_inc_vgpu_rpc_headers_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#define MAX_GPC_COUNT 32 - -typedef enum -{ - NV_RPC_UPDATE_PDE_BAR_1, - NV_RPC_UPDATE_PDE_BAR_2, - NV_RPC_UPDATE_PDE_BAR_INVALID, -} NV_RPC_UPDATE_PDE_BAR_TYPE; - -typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS -{ - NvU32 headIndex; - NvU32 maxHResolution; - NvU32 maxVResolution; -} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS; - -typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS -{ - NvU32 numHeads; - NvU32 maxNumHeads; -} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h deleted file mode 100644 index 7801af232dff..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h +++ /dev/null @@ -1,40 +0,0 @@ -#ifndef __src_nvidia_kernel_inc_vgpu_sdk_structures_h__ -#define __src_nvidia_kernel_inc_vgpu_sdk_structures_h__ - -/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ - -/* - * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -struct pte_desc -{ - NvU32 idr:2; - NvU32 reserved1:14; - NvU32 length:16; - union { - NvU64 pte; // PTE when IDR==0; PDE when IDR > 0 - NvU64 pde; // PTE when IDR==0; PDE when IDR > 0 - } pte_pde[] NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0 -}; - -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h b/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h index e6833df1ccc7..af11648ad9c8 100644 --- a/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h +++ b/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h @@ -21,4 +21,6 @@ typedef NvU64 NvLength; typedef NvU64 RmPhysAddr; typedef NvU32 NV_STATUS; + +typedef union {} rpc_generic_union; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c index 968fb7e01b46..46e3a29f2ad7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c @@ -21,9 +21,8 @@ */ #include -#include -#include -#include +#include "nvrm/alloc.h" +#include "nvrm/rpcfn.h" static int r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c index ce2c86c159b5..f72e2a7ac6bc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c @@ -27,10 +27,8 @@ #include #include -#include -#include -#include -#include +#include "nvrm/bar.h" +#include "nvrm/rpcfn.h" static void r535_bar_flush(struct nvkm_bar *bar) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c index 0d73906f4a5a..d60003231e6d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c @@ -25,9 +25,8 @@ #include #include -#include -#include -#include +#include "nvrm/ce.h" +#include "nvrm/engine.h" struct r535_ce_obj { struct nvkm_object object; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c index 7a2da37af283..c0146c00584d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c @@ -21,8 +21,7 @@ */ #include -#include -#include +#include "nvrm/client.h" static void r535_gsp_client_dtor(struct nvkm_gsp_client *client) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c index f3f0fcd22cac..70b9ee911c5e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c @@ -21,9 +21,8 @@ */ #include -#include -#include -#include +#include "nvrm/ctrl.h" +#include "nvrm/rpcfn.h" static void r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c index 09173ca1c050..094abf7b5f97 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c @@ -21,13 +21,8 @@ */ #include -#include -#include -#include -#include -#include -#include -#include +#include "nvrm/device.h" +#include "nvrm/event.h" static void r535_gsp_event_dtor(struct nvkm_gsp_event *event) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c index 1aae15167249..475faa35361a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c @@ -36,17 +36,7 @@ #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "nvrm/disp.h" #include diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c index 6305f3a93810..b2f22bd93f4e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c @@ -20,18 +20,12 @@ * OTHER DEALINGS IN THE SOFTWARE. */ #include - #include #include -#include -#include -#include -#include -#include -#include -#include +#include "nvrm/fbsr.h" +#include "nvrm/rpcfn.h" struct fbsr_item { const char *type; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c index 621e5dfe898a..594a6bbb1db2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c @@ -33,17 +33,8 @@ #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "nvrm/fifo.h" +#include "nvrm/engine.h" static u32 r535_chan_doorbell_handle(struct nvkm_chan *chan) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c index 37bde547ae65..ab941d808e24 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c @@ -30,12 +30,7 @@ #include -#include -#include -#include -#include -#include -#include +#include "nvrm/gr.h" #define r535_gr(p) container_of((p), struct r535_gr, base) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index f42879b2ea7e..48af5d8d22e8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -32,22 +32,11 @@ #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "nvrm/gsp.h" +#include "nvrm/rpcfn.h" +#include "nvrm/msgfn.h" +#include "nvrm/event.h" +#include "nvrm/fifo.h" #include #include diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c index 16c1928f6d68..05d0916d199e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c @@ -25,8 +25,7 @@ #include #include -#include -#include +#include "nvrm/nvdec.h" struct r535_nvdec_obj { struct nvkm_object object; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c index b6808a50c4a8..dcf80d1f1e9e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c @@ -25,8 +25,7 @@ #include #include -#include -#include +#include "nvrm/nvenc.h" struct r535_nvenc_obj { struct nvkm_object object; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c index 994232b3d030..8a8d7becba93 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c @@ -25,8 +25,7 @@ #include #include -#include -#include +#include "nvrm/nvjpg.h" struct r535_nvjpg_obj { struct nvkm_object object; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h new file mode 100644 index 000000000000..cbc7e611fbda --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_ALLOC_H__ +#define __NVRM_ALLOC_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +typedef struct rpc_gsp_rm_alloc_v03_00 +{ + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvU32 hClass; + NvU32 status; + NvU32 paramsSize; + NvU32 flags; + NvU8 reserved[4]; + NvU8 params[]; +} rpc_gsp_rm_alloc_v03_00; + +typedef struct NVOS00_PARAMETERS_v03_00 +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectOld; + NvV32 status; +} NVOS00_PARAMETERS_v03_00; + +typedef struct rpc_free_v03_00 +{ + NVOS00_PARAMETERS_v03_00 params; +} rpc_free_v03_00; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h new file mode 100644 index 000000000000..60b0b08491ee --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_BAR_H__ +#define __NVRM_BAR_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +typedef enum +{ + NV_RPC_UPDATE_PDE_BAR_1, + NV_RPC_UPDATE_PDE_BAR_2, + NV_RPC_UPDATE_PDE_BAR_INVALID, +} NV_RPC_UPDATE_PDE_BAR_TYPE; + +typedef struct UpdateBarPde_v15_00 +{ + NV_RPC_UPDATE_PDE_BAR_TYPE barType; + NvU64 entryValue NV_ALIGN_BYTES(8); + NvU64 entryLevelShift NV_ALIGN_BYTES(8); +} UpdateBarPde_v15_00; + +typedef struct rpc_update_bar_pde_v15_00 +{ + UpdateBarPde_v15_00 info; +} rpc_update_bar_pde_v15_00; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h new file mode 100644 index 000000000000..90b0325203d2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_CE_H__ +#define __NVRM_CE_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +typedef struct NVC0B5_ALLOCATION_PARAMETERS { + NvU32 version; + NvU32 engineType; +} NVC0B5_ALLOCATION_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h new file mode 100644 index 000000000000..df0e63c0cb6b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_CLIENT_H__ +#define __NVRM_CLIENT_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */ + +#define NV_PROC_NAME_MAX_LENGTH 100U + +typedef struct NV0000_ALLOC_PARAMETERS { + NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */ + NvU32 processID; + char processName[NV_PROC_NAME_MAX_LENGTH]; +} NV0000_ALLOC_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h new file mode 100644 index 000000000000..77f10acd82c9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_CTRL_H__ +#define __NVRM_CTRL_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +typedef struct rpc_gsp_rm_control_v03_00 +{ + NvHandle hClient; + NvHandle hObject; + NvU32 cmd; + NvU32 status; + NvU32 paramsSize; + NvU32 flags; + NvU8 params[]; +} rpc_gsp_rm_control_v03_00; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h new file mode 100644 index 000000000000..3933b9ad61ce --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_DEVICE_H__ +#define __NVRM_DEVICE_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define NV01_DEVICE_0 (0x80U) /* finn: Evaluated from "NV0080_ALLOC_PARAMETERS_MESSAGE_ID" */ + +typedef struct NV0080_ALLOC_PARAMETERS { + NvU32 deviceId; + NvHandle hClientShare; + NvHandle hTargetClient; + NvHandle hTargetDevice; + NvV32 flags; + NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8); + NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8); + NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8); + NvV32 vaMode; +} NV0080_ALLOC_PARAMETERS; + +#define NV20_SUBDEVICE_0 (0x2080U) /* finn: Evaluated from "NV2080_ALLOC_PARAMETERS_MESSAGE_ID" */ + +typedef struct NV2080_ALLOC_PARAMETERS { + NvU32 subDeviceId; +} NV2080_ALLOC_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h new file mode 100644 index 000000000000..7b7539639540 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h @@ -0,0 +1,741 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_DISP_H__ +#define __NVRM_DISP_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS { + NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8); + NV_DECLARE_ALIGNED(NvU64 instMemSize, 8); + NvU32 instMemAddrSpace; + NvU32 instMemCpuCacheAttr; +} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS; + +#define NV_MEMORY_WRITECOMBINED 2 + +#define NV04_DISPLAY_COMMON (0x00000073) + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS { + NvU32 feHwSysCap; + NvU32 windowPresentMask; + NvBool bFbRemapperEnabled; + NvU32 numHeads; + NvBool bPrimaryVga; + NvU32 i2cPort; + NvU32 internalDispActiveMask; +} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS; + +#define NV2080_CTRL_ACPI_DSM_READ_SIZE (0x1000) /* finn: Evaluated from "(4 * 1024)" */ + +#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS { + NvU32 status; + NvU16 backLightDataSize; + NvU8 backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE]; +} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS; + +typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS { + NvU32 subDeviceInstance; +} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS; + +#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 numHeads; +} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS { + NvU32 subDeviceInstance; + NvU32 headMask; +} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS; + +#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayMask; + NvU32 displayMaskDDC; +} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS; + +#define NV0073_CTRL_MAX_CONNECTORS 4U + +#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 DDCPartners; + NvU32 count; + struct { + NvU32 index; + NvU32 type; + NvU32 location; + } data[NV0073_CTRL_MAX_CONNECTORS]; + NvU32 platform; +} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 index; + NvU32 type; + NvU32 protocol; + NvU32 ditherType; + NvU32 ditherAlgo; + NvU32 location; + NvU32 rootPortId; + NvU32 dcbIndex; + NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8); + NvBool bIsLitByVbios; + NvBool bIsDispDynamic; +} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS; + +#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC (0x00000001U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR (0x00000002U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR (0x00000003U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI (0x00000005U) + +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS (0x00000005U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A (0x00000008U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B (0x00000009U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI (0x00000010U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI (0x00000011U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN (0xFFFFFFFFU) + +#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS { + NvBool bDscSupported; + NvU32 encoderColorFormatMask; + NvU32 lineBufferSizeKB; + NvU32 rateBufferSizeKB; + NvU32 bitsPerPixelPrecision; + NvU32 maxNumHztSlices; + NvU32 lineBufferBitDepth; +} NV0073_CTRL_CMD_DSC_CAP_PARAMS; + +typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS { + NvU32 subDeviceInstance; + NvU32 sorIndex; + NvU32 maxLinkRate; + NvU32 dpVersionsSupported; + NvU32 UHBRSupported; + NvBool bIsMultistreamSupported; + NvBool bIsSCEnabled; + NvBool bHasIncreasedWatermarkLimits; + NvBool bIsPC2Disabled; + NvBool isSingleHeadMSTSupported; + NvBool bFECSupported; + NvBool bIsTrainPhyRepeater; + NvBool bOverrideLinkBw; + NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC; +} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */ +#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0 +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1 +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0 +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U) + +#define NV2080_NOTIFIERS_HOTPLUG (1) + +typedef struct { + NvU32 plugDisplayMask; + NvU32 unplugDisplayMask; +} Nv2080HotplugNotification; + +#define NV2080_NOTIFIERS_DP_IRQ (7) + +typedef struct Nv2080DpIrqNotificationRec { + NvU32 displayId; +} Nv2080DpIrqNotification; + +#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 displayMask; + NvU32 retryTimeMs; +} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS; + +#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 flags2; +} NV0073_CTRL_DFP_GET_INFO_PARAMS; + +#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0 +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U) +#define NV0073_CTRL_DFP_FLAGS_LANE 5:3 +#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6 +#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7 +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8 +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9 +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10 +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11 +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12 +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14 +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15 +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16 +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17 +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_LINK 21:20 +#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22 +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23 +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25 +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26 +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30 +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U) + +#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 flags; + NvU32 displayId; +} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS; + +typedef NvU32 NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG; + +typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO { + NvU32 displayMask; + NvU32 sorType; +} NV0073_CTRL_DFP_ASSIGN_SOR_INFO; + +#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS 4U + +#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU8 sorExcludeMask; + NvU32 slaveDisplayId; + NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig; + NvBool bIs2Head1Or; + NvU32 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS]; + NV0073_CTRL_DFP_ASSIGN_SOR_INFO sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS]; + NvU8 reservedSorMask; + NvU32 flags; +} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS; + +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO 0:0 +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL (0x00000001U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT (0x00000000U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE 1:1 +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO (0x00000000U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U) + +#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 brightness; + NvBool bUncalibrated; +} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS; + +#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER 96U + +typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 numELDSize; + NvU8 bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER]; + NvU32 maxFreqSupported; + NvU32 ctrl; + NvU32 deviceEntry; +} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS; + +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD 0:0 +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV 1:1 +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE (0x00000001U) + +#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U + +#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 bufferSize; + NvU32 flags; + NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES]; +} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS { + NvU8 subDeviceInstance; + NvU32 displayId; + NvU8 enable; +} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 caps; +} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED 0:0 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED 1:1 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED 2:2 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED 5:3 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED 6:6 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED 9:7 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U) + +#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE 36U + +#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 transmitControl; + NvU32 packetSize; + NvU32 targetHead; + NvBool bUsePsrHeadforSdp; + NvU8 aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE]; +} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE 0:0 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME 1:1 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME 2:2 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK 3:3 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE 4:4 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT 5:5 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY 6:6 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING 7:7 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE 9:8 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0 (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1 (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE 31:31 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES (0x0000001U) + +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM (0x730275U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS { + NvU8 subDeviceInstance; + NvU32 displayId; + NvU8 mute; +} NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS; + +#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U + +#define NV0073_CTRL_CMD_DP_AUXCH_CTRL (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bAddrOnly; + NvU32 cmd; + NvU32 addr; + NvU8 data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE]; + NvU32 size; + NvU32 replyType; + NvU32 retryTimeMs; +} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS; + +#define NV0073_CTRL_DP_AUXCH_CMD_TYPE 3:3 +#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT 2:2 +#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE 1:0 +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS (0x00000002U) + +#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS { + // In + NvU32 subDeviceInstance; + NvU32 displayId; + NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES]; + + // Out + NvU8 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES]; + NvU8 linkBwCount; +} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS; + +#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DP_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 cmd; + NvU32 data; + NvU32 err; + NvU32 retryTimeMs; + NvU32 eightLaneDpcdBaseAddr; +} NV0073_CTRL_DP_CTRL_PARAMS; + +#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT 0:0 +#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_SET_LINK_BW 1:1 +#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD 2:2 +#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_UNUSED 3:3 +#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE 4:4 +#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM (0x00000001U) +#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING 5:5 +#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING 6:6 +#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING 7:7 +#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING 8:8 +#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT (0x00000000U) +#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE (0x00000001U) +#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING 9:9 +#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED 10:10 +#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING 12:11 +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U) +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON (0x00000002U) +#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER 13:13 +#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG 14:14 +#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_FEC 15:15 +#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST 29:29 +#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE 30:30 +#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG 31:31 +#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE 3:0 +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN (0x00000000U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK (0x00000002U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN (0x00000003U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE (0x00000004U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK (0x00000005U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR (0x00000006U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO (0x00000007U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO (0x00000008U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK (0x00000009U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK (0x00000000U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING (0x80000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR (0x80000002U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR (0x80000003U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR (0x80000004U) + +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT 4:0 +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 (0x00000000U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 (0x00000001U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 (0x00000002U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 (0x00000004U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 (0x00000008U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW 15:8 +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS (0x00000006U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS (0x00000008U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS (0x00000009U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS (0x0000000AU) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS (0x0000000CU) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS (0x00000010U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS (0x00000014U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS (0x0000001EU) +#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING 18:18 +#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO (0x00000000U) +#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES (0x00000001U) +#define NV0073_CTRL_DP_DATA_TARGET 22:19 +#define NV0073_CTRL_DP_DATA_TARGET_SINK (0x00000000U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0 (0x00000001U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1 (0x00000002U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2 (0x00000003U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3 (0x00000004U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4 (0x00000005U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5 (0x00000006U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6 (0x00000007U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7 (0x00000008U) + +#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_MAX_LANES 8U + +typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 numLanes; + NvU32 data[NV0073_CTRL_MAX_LANES]; +} NV0073_CTRL_DP_LANE_DATA_PARAMS; + +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS 1:0 +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE (0x00000000U) +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 (0x00000001U) +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 (0x00000002U) +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 (0x00000003U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT 3:2 +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U) + +#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 preferredDisplayId; + + NvBool force; + NvBool useBFM; + + NvU32 displayIdAssigned; + NvU32 allDisplayMask; +} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS; + +#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; +} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS; + +#define NV0073_CTRL_CMD_DP_CONFIG_STREAM (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 sorIndex; + NvU32 dpLink; + + NvBool bEnableOverride; + NvBool bMST; + NvU32 singleHeadMultistreamMode; + NvU32 hBlankSym; + NvU32 vBlankSym; + NvU32 colorFormat; + NvBool bEnableTwoHeadOneOr; + + struct { + NvU32 slotStart; + NvU32 slotEnd; + NvU32 PBN; + NvU32 Timeslice; + NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT + NvU32 singleHeadMSTPipeline; + NvBool bEnableAudioOverRightPanel; + } MST; + + struct { + NvBool bEnhancedFraming; + NvU32 tuSize; + NvU32 waterMark; + NvU32 actualPclkHz; // deprecated -Use MvidWarParams + NvU32 linkClkFreqHz; // deprecated -Use MvidWarParams + NvBool bEnableAudioOverRightPanel; + struct { + NvU32 activeCnt; + NvU32 activeFrac; + NvU32 activePolarity; + NvBool mvidWarEnabled; + struct { + NvU32 actualPclkHz; + NvU32 linkClkFreqHz; + } MvidWarParams; + } Legacy; + } SST; +} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS; + +#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool enable; +} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS; + +#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 mute; +} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS { + NvU32 addressSpace; + NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NvU32 cacheSnoop; + NvU32 hclass; + NvU32 channelInstance; + NvBool valid; +} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS; + +#define ADDR_SYSMEM (1) // System memory (PCI) + +#define ADDR_FBMEM 2 // Frame buffer memory space + +typedef struct +{ + NvV32 channelInstance; // One of the n channel instances of a given channel type. + // All PIO channels have two instances (one per head). + NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors. + NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel +} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvV32 channelInstance; // One of the n channel instances of a given channel type. + // Note that core channel has only one instance + // while all others have two (one per head). + NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer + NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications + NvU32 offset; // Initial offset for put/get, usually zero. + NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs + + NvU32 flags; +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1 +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000 +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001 + +} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h new file mode 100644 index 000000000000..b26dfc8f8087 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h @@ -0,0 +1,260 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_ENGINE_H__ +#define __NVRM_ENGINE_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define MC_ENGINE_IDX_NULL 0 // This must be 0 +#define MC_ENGINE_IDX_TMR 1 +#define MC_ENGINE_IDX_DISP 2 +#define MC_ENGINE_IDX_FB 3 +#define MC_ENGINE_IDX_FIFO 4 +#define MC_ENGINE_IDX_VIDEO 5 +#define MC_ENGINE_IDX_MD 6 +#define MC_ENGINE_IDX_BUS 7 +#define MC_ENGINE_IDX_PMGR 8 +#define MC_ENGINE_IDX_VP2 9 +#define MC_ENGINE_IDX_CIPHER 10 +#define MC_ENGINE_IDX_BIF 11 +#define MC_ENGINE_IDX_PPP 12 +#define MC_ENGINE_IDX_PRIVRING 13 +#define MC_ENGINE_IDX_PMU 14 +#define MC_ENGINE_IDX_CE0 15 +#define MC_ENGINE_IDX_CE1 16 +#define MC_ENGINE_IDX_CE2 17 +#define MC_ENGINE_IDX_CE3 18 +#define MC_ENGINE_IDX_CE4 19 +#define MC_ENGINE_IDX_CE5 20 +#define MC_ENGINE_IDX_CE6 21 +#define MC_ENGINE_IDX_CE7 22 +#define MC_ENGINE_IDX_CE8 23 +#define MC_ENGINE_IDX_CE9 24 +#define MC_ENGINE_IDX_CE_MAX MC_ENGINE_IDX_CE9 +#define MC_ENGINE_IDX_VIC 35 +#define MC_ENGINE_IDX_ISOHUB 36 +#define MC_ENGINE_IDX_VGPU 37 +#define MC_ENGINE_IDX_MSENC 38 +#define MC_ENGINE_IDX_MSENC1 39 +#define MC_ENGINE_IDX_MSENC2 40 +#define MC_ENGINE_IDX_C2C 41 +#define MC_ENGINE_IDX_LTC 42 +#define MC_ENGINE_IDX_FBHUB 43 +#define MC_ENGINE_IDX_HDACODEC 44 +#define MC_ENGINE_IDX_GMMU 45 +#define MC_ENGINE_IDX_SEC2 46 +#define MC_ENGINE_IDX_FSP 47 +#define MC_ENGINE_IDX_NVLINK 48 +#define MC_ENGINE_IDX_GSP 49 +#define MC_ENGINE_IDX_NVJPG 50 +#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG +#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG +#define MC_ENGINE_IDX_NVJPEG1 51 +#define MC_ENGINE_IDX_NVJPEG2 52 +#define MC_ENGINE_IDX_NVJPEG3 53 +#define MC_ENGINE_IDX_NVJPEG4 54 +#define MC_ENGINE_IDX_NVJPEG5 55 +#define MC_ENGINE_IDX_NVJPEG6 56 +#define MC_ENGINE_IDX_NVJPEG7 57 +#define MC_ENGINE_IDX_REPLAYABLE_FAULT 58 +#define MC_ENGINE_IDX_ACCESS_CNTR 59 +#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT 60 +#define MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR 61 +#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_ERROR 62 +#define MC_ENGINE_IDX_INFO_FAULT 63 +#define MC_ENGINE_IDX_BSP 64 +#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP +#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC +#define MC_ENGINE_IDX_NVDEC1 65 +#define MC_ENGINE_IDX_NVDEC2 66 +#define MC_ENGINE_IDX_NVDEC3 67 +#define MC_ENGINE_IDX_NVDEC4 68 +#define MC_ENGINE_IDX_NVDEC5 69 +#define MC_ENGINE_IDX_NVDEC6 70 +#define MC_ENGINE_IDX_NVDEC7 71 +#define MC_ENGINE_IDX_CPU_DOORBELL 72 +#define MC_ENGINE_IDX_PRIV_DOORBELL 73 +#define MC_ENGINE_IDX_MMU_ECC_ERROR 74 +#define MC_ENGINE_IDX_BLG 75 +#define MC_ENGINE_IDX_PERFMON 76 +#define MC_ENGINE_IDX_BUF_RESET 77 +#define MC_ENGINE_IDX_XBAR 78 +#define MC_ENGINE_IDX_ZPW 79 +#define MC_ENGINE_IDX_OFA0 80 +#define MC_ENGINE_IDX_TEGRA 81 +#define MC_ENGINE_IDX_GR 82 +#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR +#define MC_ENGINE_IDX_GR1 83 +#define MC_ENGINE_IDX_GR2 84 +#define MC_ENGINE_IDX_GR3 85 +#define MC_ENGINE_IDX_GR4 86 +#define MC_ENGINE_IDX_GR5 87 +#define MC_ENGINE_IDX_GR6 88 +#define MC_ENGINE_IDX_GR7 89 +#define MC_ENGINE_IDX_ESCHED 90 +#define MC_ENGINE_IDX_ESCHED__SIZE 64 +#define MC_ENGINE_IDX_GR_FECS_LOG 154 +#define MC_ENGINE_IDX_GR0_FECS_LOG MC_ENGINE_IDX_GR_FECS_LOG +#define MC_ENGINE_IDX_GR1_FECS_LOG 155 +#define MC_ENGINE_IDX_GR2_FECS_LOG 156 +#define MC_ENGINE_IDX_GR3_FECS_LOG 157 +#define MC_ENGINE_IDX_GR4_FECS_LOG 158 +#define MC_ENGINE_IDX_GR5_FECS_LOG 159 +#define MC_ENGINE_IDX_GR6_FECS_LOG 160 +#define MC_ENGINE_IDX_GR7_FECS_LOG 161 +#define MC_ENGINE_IDX_TMR_SWRL 162 +#define MC_ENGINE_IDX_DISP_GSP 163 +#define MC_ENGINE_IDX_REPLAYABLE_FAULT_CPU 164 +#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_CPU 165 +#define MC_ENGINE_IDX_PXUC 166 +#define MC_ENGINE_IDX_MAX 167 // This must be kept as the max bit if +#define MC_ENGINE_IDX_INVALID 0xFFFFFFFF +#define MC_ENGINE_IDX_GRn(x) (MC_ENGINE_IDX_GR0 + (x)) +#define MC_ENGINE_IDX_GRn_FECS_LOG(x) (MC_ENGINE_IDX_GR0_FECS_LOG + (x)) +#define MC_ENGINE_IDX_CE(x) (MC_ENGINE_IDX_CE0 + (x)) +#define MC_ENGINE_IDX_MSENCn(x) (MC_ENGINE_IDX_MSENC + (x)) +#define MC_ENGINE_IDX_NVDECn(x) (MC_ENGINE_IDX_NVDEC + (x)) +#define MC_ENGINE_IDX_NVJPEGn(x) (MC_ENGINE_IDX_NVJPEG + (x)) +#define MC_ENGINE_IDX_ESCHEDn(x) (MC_ENGINE_IDX_ESCHED + (x)) + +typedef enum +{ + RM_ENGINE_TYPE_NULL = (0x00000000), + RM_ENGINE_TYPE_GR0 = (0x00000001), + RM_ENGINE_TYPE_GR1 = (0x00000002), + RM_ENGINE_TYPE_GR2 = (0x00000003), + RM_ENGINE_TYPE_GR3 = (0x00000004), + RM_ENGINE_TYPE_GR4 = (0x00000005), + RM_ENGINE_TYPE_GR5 = (0x00000006), + RM_ENGINE_TYPE_GR6 = (0x00000007), + RM_ENGINE_TYPE_GR7 = (0x00000008), + RM_ENGINE_TYPE_COPY0 = (0x00000009), + RM_ENGINE_TYPE_COPY1 = (0x0000000a), + RM_ENGINE_TYPE_COPY2 = (0x0000000b), + RM_ENGINE_TYPE_COPY3 = (0x0000000c), + RM_ENGINE_TYPE_COPY4 = (0x0000000d), + RM_ENGINE_TYPE_COPY5 = (0x0000000e), + RM_ENGINE_TYPE_COPY6 = (0x0000000f), + RM_ENGINE_TYPE_COPY7 = (0x00000010), + RM_ENGINE_TYPE_COPY8 = (0x00000011), + RM_ENGINE_TYPE_COPY9 = (0x00000012), + RM_ENGINE_TYPE_NVDEC0 = (0x0000001d), + RM_ENGINE_TYPE_NVDEC1 = (0x0000001e), + RM_ENGINE_TYPE_NVDEC2 = (0x0000001f), + RM_ENGINE_TYPE_NVDEC3 = (0x00000020), + RM_ENGINE_TYPE_NVDEC4 = (0x00000021), + RM_ENGINE_TYPE_NVDEC5 = (0x00000022), + RM_ENGINE_TYPE_NVDEC6 = (0x00000023), + RM_ENGINE_TYPE_NVDEC7 = (0x00000024), + RM_ENGINE_TYPE_NVENC0 = (0x00000025), + RM_ENGINE_TYPE_NVENC1 = (0x00000026), + RM_ENGINE_TYPE_NVENC2 = (0x00000027), + RM_ENGINE_TYPE_VP = (0x00000028), + RM_ENGINE_TYPE_ME = (0x00000029), + RM_ENGINE_TYPE_PPP = (0x0000002a), + RM_ENGINE_TYPE_MPEG = (0x0000002b), + RM_ENGINE_TYPE_SW = (0x0000002c), + RM_ENGINE_TYPE_TSEC = (0x0000002d), + RM_ENGINE_TYPE_VIC = (0x0000002e), + RM_ENGINE_TYPE_MP = (0x0000002f), + RM_ENGINE_TYPE_SEC2 = (0x00000030), + RM_ENGINE_TYPE_HOST = (0x00000031), + RM_ENGINE_TYPE_DPU = (0x00000032), + RM_ENGINE_TYPE_PMU = (0x00000033), + RM_ENGINE_TYPE_FBFLCN = (0x00000034), + RM_ENGINE_TYPE_NVJPEG0 = (0x00000035), + RM_ENGINE_TYPE_NVJPEG1 = (0x00000036), + RM_ENGINE_TYPE_NVJPEG2 = (0x00000037), + RM_ENGINE_TYPE_NVJPEG3 = (0x00000038), + RM_ENGINE_TYPE_NVJPEG4 = (0x00000039), + RM_ENGINE_TYPE_NVJPEG5 = (0x0000003a), + RM_ENGINE_TYPE_NVJPEG6 = (0x0000003b), + RM_ENGINE_TYPE_NVJPEG7 = (0x0000003c), + RM_ENGINE_TYPE_OFA = (0x0000003d), + RM_ENGINE_TYPE_LAST = (0x0000003e), +} RM_ENGINE_TYPE; + +#define NV2080_ENGINE_TYPE_NULL (0x00000000) +#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001) +#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS +#define NV2080_ENGINE_TYPE_GR1 (0x00000002) +#define NV2080_ENGINE_TYPE_GR2 (0x00000003) +#define NV2080_ENGINE_TYPE_GR3 (0x00000004) +#define NV2080_ENGINE_TYPE_GR4 (0x00000005) +#define NV2080_ENGINE_TYPE_GR5 (0x00000006) +#define NV2080_ENGINE_TYPE_GR6 (0x00000007) +#define NV2080_ENGINE_TYPE_GR7 (0x00000008) +#define NV2080_ENGINE_TYPE_COPY0 (0x00000009) +#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a) +#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b) +#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c) +#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d) +#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e) +#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f) +#define NV2080_ENGINE_TYPE_COPY7 (0x00000010) +#define NV2080_ENGINE_TYPE_COPY8 (0x00000011) +#define NV2080_ENGINE_TYPE_COPY9 (0x00000012) +#define NV2080_ENGINE_TYPE_BSP (0x00000013) +#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP +#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014) +#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015) +#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016) +#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017) +#define NV2080_ENGINE_TYPE_NVDEC5 (0x00000018) +#define NV2080_ENGINE_TYPE_NVDEC6 (0x00000019) +#define NV2080_ENGINE_TYPE_NVDEC7 (0x0000001a) +#define NV2080_ENGINE_TYPE_MSENC (0x0000001b) +#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */ +#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c) +#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d) +#define NV2080_ENGINE_TYPE_VP (0x0000001e) +#define NV2080_ENGINE_TYPE_ME (0x0000001f) +#define NV2080_ENGINE_TYPE_PPP (0x00000020) +#define NV2080_ENGINE_TYPE_MPEG (0x00000021) +#define NV2080_ENGINE_TYPE_SW (0x00000022) +#define NV2080_ENGINE_TYPE_CIPHER (0x00000023) +#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER +#define NV2080_ENGINE_TYPE_VIC (0x00000024) +#define NV2080_ENGINE_TYPE_MP (0x00000025) +#define NV2080_ENGINE_TYPE_SEC2 (0x00000026) +#define NV2080_ENGINE_TYPE_HOST (0x00000027) +#define NV2080_ENGINE_TYPE_DPU (0x00000028) +#define NV2080_ENGINE_TYPE_PMU (0x00000029) +#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a) +#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b) +#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG +#define NV2080_ENGINE_TYPE_NVJPEG1 (0x0000002c) +#define NV2080_ENGINE_TYPE_NVJPEG2 (0x0000002d) +#define NV2080_ENGINE_TYPE_NVJPEG3 (0x0000002e) +#define NV2080_ENGINE_TYPE_NVJPEG4 (0x0000002f) +#define NV2080_ENGINE_TYPE_NVJPEG5 (0x00000030) +#define NV2080_ENGINE_TYPE_NVJPEG6 (0x00000031) +#define NV2080_ENGINE_TYPE_NVJPEG7 (0x00000032) +#define NV2080_ENGINE_TYPE_OFA (0x00000033) +#define NV2080_ENGINE_TYPE_LAST (0x0000003e) +#define NV2080_ENGINE_TYPE_ALLENGINES (0xffffffff) +#define NV2080_ENGINE_TYPE_COPY_SIZE 10 +#define NV2080_ENGINE_TYPE_NVENC_SIZE 3 +#define NV2080_ENGINE_TYPE_NVJPEG_SIZE 8 +#define NV2080_ENGINE_TYPE_NVDEC_SIZE 8 +#define NV2080_ENGINE_TYPE_GR_SIZE 8 +#define NV2080_ENGINE_TYPE_COPY(i) (NV2080_ENGINE_TYPE_COPY0+(i)) +#define NV2080_ENGINE_TYPE_IS_COPY(i) (((i) >= NV2080_ENGINE_TYPE_COPY0) && ((i) <= NV2080_ENGINE_TYPE_COPY9)) +#define NV2080_ENGINE_TYPE_COPY_IDX(i) ((i) - NV2080_ENGINE_TYPE_COPY0) +#define NV2080_ENGINE_TYPE_NVENC(i) (NV2080_ENGINE_TYPE_NVENC0+(i)) +#define NV2080_ENGINE_TYPE_IS_NVENC(i) (((i) >= NV2080_ENGINE_TYPE_NVENC0) && ((i) < NV2080_ENGINE_TYPE_NVENC(NV2080_ENGINE_TYPE_NVENC_SIZE))) +#define NV2080_ENGINE_TYPE_NVENC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVENC0) +#define NV2080_ENGINE_TYPE_NVDEC(i) (NV2080_ENGINE_TYPE_NVDEC0+(i)) +#define NV2080_ENGINE_TYPE_IS_NVDEC(i) (((i) >= NV2080_ENGINE_TYPE_NVDEC0) && ((i) < NV2080_ENGINE_TYPE_NVDEC(NV2080_ENGINE_TYPE_NVDEC_SIZE))) +#define NV2080_ENGINE_TYPE_NVDEC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVDEC0) +#define NV2080_ENGINE_TYPE_NVJPEG(i) (NV2080_ENGINE_TYPE_NVJPEG0+(i)) +#define NV2080_ENGINE_TYPE_IS_NVJPEG(i) (((i) >= NV2080_ENGINE_TYPE_NVJPEG0) && ((i) < NV2080_ENGINE_TYPE_NVJPEG(NV2080_ENGINE_TYPE_NVJPEG_SIZE))) +#define NV2080_ENGINE_TYPE_NVJPEG_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVJPEG0) +#define NV2080_ENGINE_TYPE_GR(i) (NV2080_ENGINE_TYPE_GR0 + (i)) +#define NV2080_ENGINE_TYPE_IS_GR(i) (((i) >= NV2080_ENGINE_TYPE_GR0) && ((i) < NV2080_ENGINE_TYPE_GR(NV2080_ENGINE_TYPE_GR_SIZE))) +#define NV2080_ENGINE_TYPE_GR_IDX(i) ((i) - NV2080_ENGINE_TYPE_GR0) +#define NV2080_ENGINE_TYPE_IS_VALID(i) (((i) > (NV2080_ENGINE_TYPE_NULL)) && ((i) < (NV2080_ENGINE_TYPE_LAST))) +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h new file mode 100644 index 000000000000..057f7220c225 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_EVENT_H__ +#define __NVRM_EVENT_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e) + +typedef struct NV0005_ALLOC_PARAMETERS { + NvHandle hParentClient; + NvHandle hSrcResource; + + NvV32 hClass; + NvV32 notifyIndex; + NV_DECLARE_ALIGNED(NvP64 data, 8); +} NV0005_ALLOC_PARAMETERS; + +#define NV01_EVENT_CLIENT_RM (0x04000000) + +#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS { + NvU32 event; + NvU32 action; + NvBool bNotifyState; + NvU32 info32; + NvU16 info16; +} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002) + +typedef struct rpc_post_event_v17_00 +{ + NvHandle hClient; + NvHandle hEvent; + NvU32 notifyIndex; + NvU32 data; + NvU16 info16; + NvU32 status; + NvU32 eventDataSize; + NvBool bNotifyList; + NvU8 eventData[]; +} rpc_post_event_v17_00; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h new file mode 100644 index 000000000000..28786ef013a2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_FBSR_H__ +#define __NVRM_FBSR_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define NV01_MEMORY_LIST_FBMEM (0x00000082) + +#define NV01_MEMORY_LIST_SYSTEM (0x00000081) + +#define NVOS02_FLAGS_PHYSICALITY 7:4 +#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS (0x00000000) +#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS (0x00000001) +#define NVOS02_FLAGS_LOCATION 11:8 +#define NVOS02_FLAGS_LOCATION_PCI (0x00000000) +#define NVOS02_FLAGS_LOCATION_AGP (0x00000001) +#define NVOS02_FLAGS_LOCATION_VIDMEM (0x00000002) +#define NVOS02_FLAGS_COHERENCY 15:12 +#define NVOS02_FLAGS_COHERENCY_UNCACHED (0x00000000) +#define NVOS02_FLAGS_COHERENCY_CACHED (0x00000001) +#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE (0x00000002) +#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH (0x00000003) +#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT (0x00000004) +#define NVOS02_FLAGS_COHERENCY_WRITE_BACK (0x00000005) +#define NVOS02_FLAGS_ALLOC 17:16 +#define NVOS02_FLAGS_ALLOC_NONE (0x00000001) +#define NVOS02_FLAGS_GPU_CACHEABLE 18:18 +#define NVOS02_FLAGS_GPU_CACHEABLE_NO (0x00000000) +#define NVOS02_FLAGS_GPU_CACHEABLE_YES (0x00000001) +#define NVOS02_FLAGS_KERNEL_MAPPING 19:19 +#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP (0x00000000) +#define NVOS02_FLAGS_KERNEL_MAPPING_MAP (0x00000001) +#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY 20:20 +#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO (0x00000000) +#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES (0x00000001) +#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY 21:21 +#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO (0x00000000) +#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES (0x00000001) +#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY 22:22 +#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO (0x00000000) +#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES (0x00000001) +#define NVOS02_FLAGS_PEER_MAP_OVERRIDE 23:23 +#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT (0x00000000) +#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED (0x00000001) +#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT 24:24 +#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE (0x00000001) +#define NVOS02_FLAGS_MEMORY_PROTECTION 26:25 +#define NVOS02_FLAGS_MEMORY_PROTECTION_DEFAULT (0x00000000) +#define NVOS02_FLAGS_MEMORY_PROTECTION_PROTECTED (0x00000001) +#define NVOS02_FLAGS_MEMORY_PROTECTION_UNPROTECTED (0x00000002) +#define NVOS02_FLAGS_MAPPING 31:30 +#define NVOS02_FLAGS_MAPPING_DEFAULT (0x00000000) +#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001) +#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002) + +struct pte_desc +{ + NvU32 idr:2; + NvU32 reserved1:14; + NvU32 length:16; + union { + NvU64 pte; // PTE when IDR==0; PDE when IDR > 0 + NvU64 pde; // PTE when IDR==0; PDE when IDR > 0 + } pte_pde[] NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0 +}; + +typedef struct rpc_alloc_memory_v13_01 +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvU32 hClass; + NvU32 flags; + NvU32 pteAdjust; + NvU32 format; + NvU64 length NV_ALIGN_BYTES(8); + NvU32 pageCount; + struct pte_desc pteDesc; +} rpc_alloc_memory_v13_01; + +#define FBSR_TYPE_DMA 4 // Copy using DMA. Fastest. + +#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS { + NvU32 fbsrType; + NvU32 numRegions; + NvHandle hClient; + NvHandle hSysMem; + NV_DECLARE_ALIGNED(NvU64 gspFbAllocsSysOffset, 8); + NvBool bEnteringGcoffState; +} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS { + NvU32 fbsrType; + NvHandle hClient; + NvHandle hVidMem; + NV_DECLARE_ALIGNED(NvU64 vidOffset, 8); + NV_DECLARE_ALIGNED(NvU64 sysOffset, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); +} NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h new file mode 100644 index 000000000000..325fdd8b6090 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h @@ -0,0 +1,350 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_FIFO_H__ +#define __NVRM_FIFO_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES 32 + +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES 16 + +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA 2 + +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16 + +typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY { + NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES]; + NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA]; + NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA]; + NvU32 numPbdmas; + char engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN]; +} NV2080_CTRL_FIFO_DEVICE_ENTRY; + +#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS { + NvU32 baseIndex; + NvU32 numEntries; + NvBool bMore; + // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES]; + NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES]; +} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS; + +typedef enum +{ + /* ************************************************************************* + * Bug 3820969 + * THINK BEFORE CHANGING ENUM ORDER HERE. + * VGPU-guest uses this same ordering. Because this enum is not versioned, + * changing the order here WILL BREAK old-guest-on-newer-host compatibility. + * ************************************************************************/ + + // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc., + ENGINE_INFO_TYPE_ENG_DESC = 0, + + // HW engine ID + ENGINE_INFO_TYPE_FIFO_TAG, + + // RM_ENGINE_TYPE_* + ENGINE_INFO_TYPE_RM_ENGINE_TYPE, + + // + // runlist id (meaning varies by GPU) + // Valid only for Esched-driven engines + // + ENGINE_INFO_TYPE_RUNLIST, + + // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_* + ENGINE_INFO_TYPE_MMU_FAULT_ID, + + // ROBUST_CHANNEL_* + ENGINE_INFO_TYPE_RC_MASK, + + // Reset Bit Position. On Ampere, only valid if not _INVALID + ENGINE_INFO_TYPE_RESET, + + // Interrupt Bit Position + ENGINE_INFO_TYPE_INTR, + + // log2(MC_ENGINE_*) + ENGINE_INFO_TYPE_MC, + + // The DEV_TYPE_ENUM for this engine + ENGINE_INFO_TYPE_DEV_TYPE_ENUM, + + // The particular instance of this engine type + ENGINE_INFO_TYPE_INSTANCE_ID, + + // + // The base address for this engine's NV_RUNLIST. Valid only on Ampere+ + // Valid only for Esched-driven engines + // + ENGINE_INFO_TYPE_RUNLIST_PRI_BASE, + + // + // If this entry is a host-driven engine. + // Update _isEngineInfoTypeValidForOnlyHostDriven when adding any new entry. + // + ENGINE_INFO_TYPE_IS_HOST_DRIVEN_ENGINE, + + // + // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+ + // Valid only for Esched-driven engines + // + ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID, + + // + // The base address for this engine's NV_CHRAM registers. Valid only on + // Ampere+ + // + // Valid only for Esched-driven engines + // + ENGINE_INFO_TYPE_CHRAM_PRI_BASE, + + // This entry added to copy data at RMCTRL_EXPORT() call for Kernel RM + ENGINE_INFO_TYPE_KERNEL_RM_MAX, + // Used for iterating the engine info table by the index passed. + ENGINE_INFO_TYPE_INVALID = ENGINE_INFO_TYPE_KERNEL_RM_MAX, + + // Size of FIFO_ENGINE_LIST.engineData + ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE = ENGINE_INFO_TYPE_INVALID, + + // Input-only parameter for kfifoEngineInfoXlate. + ENGINE_INFO_TYPE_PBDMA_ID + + /* ************************************************************************* + * Bug 3820969 + * THINK BEFORE CHANGING ENUM ORDER HERE. + * VGPU-guest uses this same ordering. Because this enum is not versioned, + * changing the order here WILL BREAK old-guest-on-newer-host compatibility. + * ************************************************************************/ +} ENGINE_INFO_TYPE; + +#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS { + NvU32 size; +} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS 0x40 + +typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO { + NvU32 engDesc; + NvU32 ctxAttr; + NvU32 ctxBufferSize; + NvU32 addrSpaceList; + NvU32 registerBase; +} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO; + +#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS { + NvU32 numConstructedFalcons; + NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS]; +} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS; + +#define NV_MAX_SUBDEVICES 8 + +typedef struct NV_MEMORY_DESC_PARAMS { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 addressSpace; + NvU32 cacheAttrib; +} NV_MEMORY_DESC_PARAMS; + +#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U + +#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U + +typedef struct NV_CHANNEL_ALLOC_PARAMS { + + NvHandle hObjectError; // error context DMA + NvHandle hObjectBuffer; // no longer used + NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO + NvU32 gpFifoEntries; // number of GP FIFO entries + + NvU32 flags; + + + NvHandle hContextShare; // context share handle + NvHandle hVASpace; // VASpace for the channel + + // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0 + NvHandle hUserdMemory[NV_MAX_SUBDEVICES]; + + // offset to beginning of UserD within hUserdMemory[x] + NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8); + + // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated + NvU32 engineType; + // Channel identifier that is unique for the duration of a RM session + NvU32 cid; + // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods + NvU32 subDeviceId; + NvHandle hObjectEccError; // ECC error context DMA + + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8); + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8); + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8); + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8); + + NvHandle hPhysChannelGroup; // reserved + NvU32 internalFlags; // reserved + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved + NvU32 ProcessID; // reserved + NvU32 SubProcessID; // reserved + + // IV used for CPU-side encryption / GPU-side decryption. + NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved + // IV used for CPU-side decryption / GPU-side encryption. + NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved + // Nonce used CPU-side signing / GPU-side signature verification. + NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved +} NV_CHANNEL_ALLOC_PARAMS; + +typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS; + +#define NVOS04_FLAGS_CHANNEL_TYPE 1:0 +#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000 +#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE +#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE +#define NVOS04_FLAGS_VPR 2:2 +#define NVOS04_FLAGS_VPR_FALSE 0x00000000 +#define NVOS04_FLAGS_VPR_TRUE 0x00000001 +#define NVOS04_FLAGS_CC_SECURE 2:2 +#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000 +#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002 +#define NVOS04_FLAGS_MAP_CHANNEL 30:30 +#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001 + +typedef enum { + /*! + * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by + * kernel CPU-RM clients. + */ + ERROR_NOTIFIER_TYPE_UNKNOWN = 0, + /*! @brief Error notifier is explicitly not set. + * + * The corresponding hErrorContext or hEccErrorContext must be + * NV01_NULL_OBJECT. + */ + ERROR_NOTIFIER_TYPE_NONE, + /*! @brief Error notifier is a ContextDma */ + ERROR_NOTIFIER_TYPE_CTXDMA, + /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */ + ERROR_NOTIFIER_TYPE_MEMORY +} ErrorNotifierType; + +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY + +#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */ +typedef struct NVA06F_CTRL_BIND_PARAMS { + NvU32 engineType; +} NVA06F_CTRL_BIND_PARAMS; + +#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */ +typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS { + NvBool bEnable; + NvBool bSkipSubmit; +} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS; + +#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES 16U + +typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY { + NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8); + NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 physAttr; + NvU16 bufferId; + NvU8 bInitialize; + NvU8 bNonmapped; +} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY; + +#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS { + NvU32 engineType; + NvHandle hClient; + NvU32 ChID; + NvHandle hChanClient; + NvHandle hObject; + NvHandle hVirtMemory; + NV_DECLARE_ALIGNED(NvU64 virtAddress, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 entryCount; + // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES]; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8); +} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS; + +typedef struct rpc_rc_triggered_v17_02 +{ + NvU32 nv2080EngineType; + NvU32 chid; + NvU32 exceptType; + NvU32 scope; + NvU16 partitionAttributionId; +} rpc_rc_triggered_v17_02; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h new file mode 100644 index 000000000000..82c5ec727bb4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_GR_H__ +#define __NVRM_GR_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8 + +#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19 + +typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO { + NvU32 size; + NvU32 alignment; +} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO { + NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT]; +} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS; + +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0 +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD (0x00000001) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO (0x00000002) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG (0x00000003) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE (0x00000004) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY (0x00000005) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION (0x00000006) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS (0x00000007) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL (0x00000008) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM (0x00000009) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT (0x0000000a) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT (0x0000000b) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL (0x0000000c) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL (0x0000000d) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB (0x0000000e) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV (0x0000000f) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH (0x00000010) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB (0x00000011) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB (0x00000013) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL (0x00000014) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL (0x00000015) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK (0x00000016) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT (0x00000017) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x00000019) + +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN 0U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM 1U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH 2U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB 3U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL 4U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB 5U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL 6U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL 7U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK 8U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT 9U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP 10U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP 12U + +#include "fifo.h" +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h new file mode 100644 index 000000000000..085a7dac0405 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h @@ -0,0 +1,817 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_GSP_H__ +#define __NVRM_GSP_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U + +typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES]; + +typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NV_DECLARE_ALIGNED(NvU64 reserved, 8); + NvU32 performance; + NvBool supportCompressed; + NvBool supportISO; + NvBool bProtected; + NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList; +} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO; + +typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS { + NvU32 numFBRegions; + NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8); +} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS; + +#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23 + +#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL) + +typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS { + NvU32 index; + NvU32 flags; + NvU32 length; + NvU8 data[NV2080_GPU_MAX_GID_LENGTH]; +} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS; + +typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS { + NvU32 gpcMask; +} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS; + +typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS { + NvU32 gpcId; + NvU32 tpcMask; +} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS; + +typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS { + NvU32 gpcId; + NvU32 zcullMask; +} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS; + +typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS { + NvU32 BoardID; + char chipSKU[4]; + char chipSKUMod[2]; + char project[5]; + char projectSKU[5]; + char CDP[6]; + char projectSKUMod[2]; + NvU32 businessCycle; +} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS; + +typedef enum +{ + COMPUTE_BRANDING_TYPE_NONE, + COMPUTE_BRANDING_TYPE_TESLA, +} COMPUTE_BRANDING_TYPE; + +#define MAX_GPC_COUNT 32 + +typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS { + NvU32 totalVFs; + NvU32 firstVfOffset; + NvU32 vfFeatureMask; + NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8); + NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8); + NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8); + NV_DECLARE_ALIGNED(NvU64 bar0Size, 8); + NV_DECLARE_ALIGNED(NvU64 bar1Size, 8); + NV_DECLARE_ALIGNED(NvU64 bar2Size, 8); + NvBool b64bitBar0; + NvBool b64bitBar1; + NvBool b64bitBar2; + NvBool bSriovEnabled; + NvBool bSriovHeavyEnabled; + NvBool bEmulateVFBar0TlbInvalidationRegister; + NvBool bClientRmAllocatedCtxBuffer; +} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS; + +#include "engine.h" + +#define NVGPU_ENGINE_CAPS_MASK_BITS 32 + +#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1) + +typedef struct GspSMInfo_t +{ + NvU32 version; + NvU32 regBankCount; + NvU32 regBankRegCount; + NvU32 maxWarpsPerSM; + NvU32 maxThreadsPerWarp; + NvU32 geomGsObufEntries; + NvU32 geomXbufEntries; + NvU32 maxSPPerSM; + NvU32 rtCoreCount; +} GspSMInfo; + +typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS { + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8, +} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS; + +#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U) + +typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS +{ + NvU32 numHeads; + NvU32 maxNumHeads; +} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS; + +typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS +{ + NvU32 headIndex; + NvU32 maxHResolution; + NvU32 maxVResolution; +} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS; + +typedef struct GspStaticConfigInfo_t +{ + NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE]; + NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo; + NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS gpcInfo; + NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS tpcInfo[MAX_GPC_COUNT]; + NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS zcullInfo[MAX_GPC_COUNT]; + NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo; + NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams; + COMPUTE_BRANDING_TYPE computeBranding; + + NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps; + NvU32 sriovMaxGfid; + + NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX]; + + GspSMInfo SM_info; + + NvBool poisonFuseEnabled; + + NvU64 fb_length; + NvU32 fbio_mask; + NvU32 fb_bus_width; + NvU32 fb_ram_type; + NvU32 fbp_mask; + NvU32 l2_cache_size; + + NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL]; + NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL]; + + NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvBool bGpuInternalSku; + NvBool bIsQuadroGeneric; + NvBool bIsQuadroAd; + NvBool bIsNvidiaNvs; + NvBool bIsVgx; + NvBool bGeforceSmb; + NvBool bIsTitan; + NvBool bIsTesla; + NvBool bIsMobile; + NvBool bIsGc6Rtd3Allowed; + NvBool bIsGcOffRtd3Allowed; + NvBool bIsGcoffLegacyAllowed; + + NvU64 bar1PdeBase; + NvU64 bar2PdeBase; + + NvBool bVbiosValid; + NvU32 vbiosSubVendor; + NvU32 vbiosSubDevice; + + NvBool bPageRetirementSupported; + + NvBool bSplitVasBetweenServerClientRm; + + NvBool bClRootportNeedsNosnoopWAR; + + VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads; + VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution; + NvU64 displaylessMaxPixels; + + // Client handle for internal RMAPI control. + NvHandle hInternalClient; + + // Device handle for internal RMAPI control. + NvHandle hInternalDevice; + + // Subdevice handle for internal RMAPI control. + NvHandle hInternalSubdevice; + + NvBool bSelfHostedMode; + NvBool bAtsSupported; + + NvBool bIsGpuUefi; +} GspStaticConfigInfo; + +typedef struct rpc_unloading_guest_driver_v1F_07 +{ + NvBool bInPMTransition; + NvBool bGc6Entering; + NvU32 newLevel; +} rpc_unloading_guest_driver_v1F_07; + +typedef struct PACKED_REGISTRY_ENTRY +{ + NvU32 nameOffset; + NvU8 type; + NvU32 data; + NvU32 length; +} PACKED_REGISTRY_ENTRY; + +typedef struct PACKED_REGISTRY_TABLE +{ + NvU32 size; + NvU32 numEntries; + PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries); +} PACKED_REGISTRY_TABLE; + +typedef struct +{ + NvU16 deviceID; // deviceID + NvU16 vendorID; // vendorID + NvU16 subdeviceID; // subsystem deviceID + NvU16 subvendorID; // subsystem vendorID + NvU8 revisionID; // revision ID +} BUSINFO; + +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U) + +typedef struct DOD_METHOD_DATA +{ + NV_STATUS status; + NvU32 acpiIdListLen; + NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; +} DOD_METHOD_DATA; + +typedef struct JT_METHOD_DATA +{ + NV_STATUS status; + NvU32 jtCaps; + NvU16 jtRevId; + NvBool bSBIOSCaps; +} JT_METHOD_DATA; + +typedef struct MUX_METHOD_DATA_ELEMENT +{ + NvU32 acpiId; + NvU32 mode; + NV_STATUS status; +} MUX_METHOD_DATA_ELEMENT; + +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U) + +typedef struct MUX_METHOD_DATA +{ + NvU32 tableLen; + MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; + MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; +} MUX_METHOD_DATA; + +typedef struct CAPS_METHOD_DATA +{ + NV_STATUS status; + NvU32 optimusCaps; +} CAPS_METHOD_DATA; + +typedef struct ACPI_METHOD_DATA +{ + NvBool bValid; + DOD_METHOD_DATA dodMethodData; + JT_METHOD_DATA jtMethodData; + MUX_METHOD_DATA muxMethodData; + CAPS_METHOD_DATA capsMethodData; +} ACPI_METHOD_DATA; + +typedef struct GSP_VF_INFO +{ + NvU32 totalVFs; + NvU32 firstVFOffset; + NvU64 FirstVFBar0Address; + NvU64 FirstVFBar1Address; + NvU64 FirstVFBar2Address; + NvBool b64bitBar0; + NvBool b64bitBar1; + NvBool b64bitBar2; +} GSP_VF_INFO; + +typedef struct GspSystemInfo +{ + NvU64 gpuPhysAddr; + NvU64 gpuPhysFbAddr; + NvU64 gpuPhysInstAddr; + NvU64 nvDomainBusDeviceFunc; + NvU64 simAccessBufPhysAddr; + NvU64 pcieAtomicsOpMask; + NvU64 consoleMemSize; + NvU64 maxUserVa; + NvU32 pciConfigMirrorBase; + NvU32 pciConfigMirrorSize; + NvU8 oorArch; + NvU64 clPdbProperties; + NvU32 Chipset; + NvBool bGpuBehindBridge; + NvBool bMnocAvailable; + NvBool bUpstreamL0sUnsupported; + NvBool bUpstreamL1Unsupported; + NvBool bUpstreamL1PorSupported; + NvBool bUpstreamL1PorMobileOnly; + NvU8 upstreamAddressValid; + BUSINFO FHBBusInfo; + BUSINFO chipsetIDInfo; + ACPI_METHOD_DATA acpiMethodData; + NvU32 hypervisorType; + NvBool bIsPassthru; + NvU64 sysTimerOffsetNs; + GSP_VF_INFO gspVFInfo; +} GspSystemInfo; + +typedef struct rpc_os_error_log_v17_00 +{ + NvU32 exceptType; + NvU32 runlistId; + NvU32 chid; + char errString[0x100]; +} rpc_os_error_log_v17_00; + +typedef struct rpc_run_cpu_sequencer_v17_00 +{ + NvU32 bufferSizeDWord; + NvU32 cmdIndex; + NvU32 regSaveArea[8]; + NvU32 commandBuffer[]; +} rpc_run_cpu_sequencer_v17_00; + +typedef enum GSP_SEQ_BUF_OPCODE +{ + GSP_SEQ_BUF_OPCODE_REG_WRITE = 0, + GSP_SEQ_BUF_OPCODE_REG_MODIFY, + GSP_SEQ_BUF_OPCODE_REG_POLL, + GSP_SEQ_BUF_OPCODE_DELAY_US, + GSP_SEQ_BUF_OPCODE_REG_STORE, + GSP_SEQ_BUF_OPCODE_CORE_RESET, + GSP_SEQ_BUF_OPCODE_CORE_START, + GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT, + GSP_SEQ_BUF_OPCODE_CORE_RESUME, +} GSP_SEQ_BUF_OPCODE; + +typedef struct +{ + NvU32 addr; + NvU32 val; +} GSP_SEQ_BUF_PAYLOAD_REG_WRITE; + +typedef struct +{ + NvU32 addr; + NvU32 mask; + NvU32 val; +} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY; + +typedef struct +{ + NvU32 addr; + NvU32 mask; + NvU32 val; + NvU32 timeout; + NvU32 error; +} GSP_SEQ_BUF_PAYLOAD_REG_POLL; + +typedef struct +{ + NvU32 val; +} GSP_SEQ_BUF_PAYLOAD_DELAY_US; + +typedef struct +{ + NvU32 addr; + NvU32 index; +} GSP_SEQ_BUF_PAYLOAD_REG_STORE; + +typedef struct GSP_SEQUENCER_BUFFER_CMD +{ + GSP_SEQ_BUF_OPCODE opCode; + union + { + GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite; + GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify; + GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll; + GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs; + GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore; + } payload; +} GSP_SEQUENCER_BUFFER_CMD; + +typedef struct +{ + // Magic + // BL to use for verification (i.e. Booter locked it in WPR2) + NvU64 magic; // = 0xdc3aae21371a60b3; + + // Revision number of Booter-BL-Sequencer handoff interface + // Bumped up when we change this interface so it is not backward compatible. + // Bumped up when we revoke GSP-RM ucode + NvU64 revision; // = 1; + + // ---- Members regarding data in SYSMEM ---------------------------- + // Consumed by Booter for DMA + + NvU64 sysmemAddrOfRadix3Elf; + NvU64 sizeOfRadix3Elf; + + NvU64 sysmemAddrOfBootloader; + NvU64 sizeOfBootloader; + + // Offsets inside bootloader image needed by Booter + NvU64 bootloaderCodeOffset; + NvU64 bootloaderDataOffset; + NvU64 bootloaderManifestOffset; + + union + { + // Used only at initial boot + struct + { + NvU64 sysmemAddrOfSignature; + NvU64 sizeOfSignature; + }; + + // + // Used at suspend/resume to read GspFwHeapFreeList + // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart) + // + struct + { + NvU32 gspFwHeapFreeListWprOffset; + NvU32 unused0; + NvU64 unused1; + }; + }; + + // ---- Members describing FB layout -------------------------------- + NvU64 gspFwRsvdStart; + + NvU64 nonWprHeapOffset; + NvU64 nonWprHeapSize; + + NvU64 gspFwWprStart; + + // GSP-RM to use to setup heap. + NvU64 gspFwHeapOffset; + NvU64 gspFwHeapSize; + + // BL to use to find ELF for jump + NvU64 gspFwOffset; + // Size is sizeOfRadix3Elf above. + + NvU64 bootBinOffset; + // Size is sizeOfBootloader above. + + NvU64 frtsOffset; + NvU64 frtsSize; + + NvU64 gspFwWprEnd; + + // GSP-RM to use for fbRegionInfo? + NvU64 fbSize; + + // ---- Other members ----------------------------------------------- + + // GSP-RM to use for fbRegionInfo? + NvU64 vgaWorkspaceOffset; + NvU64 vgaWorkspaceSize; + + // Boot count. Used to determine whether to load the firmware image. + NvU64 bootCount; + + // This union is organized the way it is to start at an 8-byte boundary and achieve natural + // packing of the internal struct fields. + union + { + struct + { + // TODO: the partitionRpc* fields below do not really belong in this + // structure. The values are patched in by the partition bootstrapper + // when GSP-RM is booted in a partition, and this structure was a + // convenient place for the bootstrapper to access them. These should + // be moved to a different comm. mechanism between the bootstrapper + // and the GSP-RM tasks. + + // Shared partition RPC memory (physical address) + NvU64 partitionRpcAddr; + + // Offsets relative to partitionRpcAddr + NvU16 partitionRpcRequestOffset; + NvU16 partitionRpcReplyOffset; + + // Code section and dataSection offset and size. + NvU32 elfCodeOffset; + NvU32 elfDataOffset; + NvU32 elfCodeSize; + NvU32 elfDataSize; + + // Used during GSP-RM resume to check for revocation + NvU32 lsUcodeVersion; + }; + + struct + { + // Pad for the partitionRpc* fields, plus 4 bytes + NvU32 partitionRpcPadding[4]; + + // CrashCat (contiguous) buffer size/location - occupies same bytes as the + // elf(Code|Data)(Offset|Size) fields above. + // TODO: move to GSP_FMC_INIT_PARAMS + NvU64 sysmemAddrOfCrashReportQueue; + NvU32 sizeOfCrashReportQueue; + + // Pad for the lsUcodeVersion field + NvU32 lsUcodeVersionPadding[1]; + }; + }; + + // Number of VF partitions allocating sub-heaps from the WPR heap + // Used during boot to ensure the heap is adequately sized + NvU8 gspFwHeapVfPartitionCount; + + // Pad structure to exactly 256 bytes. Can replace padding with additional + // fields without incrementing revision. Padding initialized to 0. + NvU8 padding[7]; + + // BL to use for verification (i.e. Booter says OK to boot) + NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified +} GspFwWprMeta; + +#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL + +#define GSP_FW_WPR_META_REVISION 1 + +typedef struct +{ + NvU32 version; // queue version + NvU32 size; // bytes, page aligned + NvU32 msgSize; // entry size, bytes, must be power-of-2, 16 is minimum + NvU32 msgCount; // number of entries in queue + NvU32 writePtr; // message id of next slot + NvU32 flags; // if set it means "i want to swap RX" + NvU32 rxHdrOff; // Offset of msgqRxHeader from start of backing store. + NvU32 entryOff; // Offset of entries from start of backing store. +} msgqTxHeader; + +typedef struct +{ + NvU32 readPtr; // message id of last message read +} msgqRxHeader; + +typedef struct { + RmPhysAddr sharedMemPhysAddr; + NvU32 pageTableEntryCount; + NvLength cmdQueueOffset; + NvLength statQueueOffset; + NvLength locklessCmdQueueOffset; + NvLength locklessStatQueueOffset; +} MESSAGE_QUEUE_INIT_ARGUMENTS; + +typedef struct { + NvU32 oldLevel; + NvU32 flags; + NvBool bInPMTransition; +} GSP_SR_INIT_ARGUMENTS; + +typedef struct +{ + MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments; + GSP_SR_INIT_ARGUMENTS srInitArguments; + NvU32 gpuInstance; + + struct + { + NvU64 pa; + NvU64 size; + } profilerArgs; +} GSP_ARGUMENTS_CACHED; + +#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0 (0x00000000U) + +#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U) + +typedef NvU64 LibosAddress; + +typedef struct +{ + LibosAddress id8; // Id tag. + LibosAddress pa; // Physical address. + LibosAddress size; // Size of memory area. + NvU8 kind; // See LibosMemoryRegionKind above. + NvU8 loc; // See LibosMemoryRegionLoc above. +} LibosMemoryRegionInitArgument; + +typedef enum { + LIBOS_MEMORY_REGION_NONE, + LIBOS_MEMORY_REGION_CONTIGUOUS, + LIBOS_MEMORY_REGION_RADIX3 +} LibosMemoryRegionKind; + +typedef enum { + LIBOS_MEMORY_REGION_LOC_NONE, + LIBOS_MEMORY_REGION_LOC_SYSMEM, + LIBOS_MEMORY_REGION_LOC_FB +} LibosMemoryRegionLoc; + +typedef struct +{ + // + // Magic + // Use for verification by Booter + // + NvU64 magic; // = GSP_FW_SR_META_MAGIC; + + // + // Revision number + // Bumped up when we change this interface so it is not backward compatible. + // Bumped up when we revoke GSP-RM ucode + // + NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION; + + // + // ---- Members regarding data in SYSMEM ---------------------------- + // Consumed by Booter for DMA + // + NvU64 sysmemAddrOfSuspendResumeData; + NvU64 sizeOfSuspendResumeData; + + // ---- Members for crypto ops across S/R --------------------------- + + // + // HMAC over the entire GspFwSRMeta structure (including padding) + // with the hmac field itself zeroed. + // + NvU8 hmac[32]; + + // Hash over GspFwWprMeta structure + NvU8 wprMetaHash[32]; + + // Hash over GspFwHeapFreeList structure. All zeros signifies no free list. + NvU8 heapFreeListHash[32]; + + // Hash over data in WPR2 (skipping over free heap chunks; see Booter for details) + NvU8 dataHash[32]; + + // + // Pad structure to exactly 256 bytes (1 DMA chunk). + // Padding initialized to zero. + // + NvU32 padding[24]; + +} GspFwSRMeta; + +#define GSP_FW_SR_META_MAGIC 0x8a3bb9e6c6c39d93ULL + +#define GSP_FW_SR_META_REVISION 2 + +#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode) \ + ((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE) / sizeof(NvU32)) : \ + (opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \ + (opcode == GSP_SEQ_BUF_OPCODE_REG_POLL) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL) / sizeof(NvU32)) : \ + (opcode == GSP_SEQ_BUF_OPCODE_DELAY_US) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US) / sizeof(NvU32)) : \ + (opcode == GSP_SEQ_BUF_OPCODE_REG_STORE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE) / sizeof(NvU32)) : \ + /* GSP_SEQ_BUF_OPCODE_CORE_RESET */ \ + /* GSP_SEQ_BUF_OPCODE_CORE_START */ \ + /* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */ \ + /* GSP_SEQ_BUF_OPCODE_CORE_RESUME */ \ + 0) + +typedef struct { + // + // Version 1 + // Version 2 + // Version 3 = for Partition boot + // Version 4 = for eb riscv boot + // Version 5 = Support signing entire RISC-V image as "code" in code section for hopper and later. + // + NvU32 version; // structure version + NvU32 bootloaderOffset; + NvU32 bootloaderSize; + NvU32 bootloaderParamOffset; + NvU32 bootloaderParamSize; + NvU32 riscvElfOffset; + NvU32 riscvElfSize; + NvU32 appVersion; // Changelist number associated with the image + // + // Manifest contains information about Monitor and it is + // input to BR + // + NvU32 manifestOffset; + NvU32 manifestSize; + // + // Monitor Data offset within RISCV image and size + // + NvU32 monitorDataOffset; + NvU32 monitorDataSize; + // + // Monitor Code offset withtin RISCV image and size + // + NvU32 monitorCodeOffset; + NvU32 monitorCodeSize; + NvU32 bIsMonitorEnabled; + // + // Swbrom Code offset within RISCV image and size + // + NvU32 swbromCodeOffset; + NvU32 swbromCodeSize; + // + // Swbrom Data offset within RISCV image and size + // + NvU32 swbromDataOffset; + NvU32 swbromDataSize; + // + // Total size of FB carveout (image and reserved space). + // + NvU32 fbReservedSize; + // + // Indicates whether the entire RISC-V image is signed as "code" in code section. + // + NvU32 bSignedAsCode; +} RM_RISCV_UCODE_DESC; + +typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY { + NvU16 engineIdx; + NvU32 pmcIntrMask; + NvU32 vectorStall; + NvU32 vectorNonStall; +} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY; + +typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP { + NvU8 subtreeStart; + NvU8 subtreeEnd; +} NV2080_INTR_CATEGORY_SUBTREE_MAP; + +#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128 + +typedef enum NV2080_INTR_CATEGORY { + NV2080_INTR_CATEGORY_DEFAULT = 0, + NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1, + NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2, + NV2080_INTR_CATEGORY_RUNLIST = 3, + NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4, + NV2080_INTR_CATEGORY_UVM_OWNED = 5, + NV2080_INTR_CATEGORY_UVM_SHARED = 6, + NV2080_INTR_CATEGORY_ENUM_COUNT = 7, +} NV2080_INTR_CATEGORY; + +#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS { + NvU32 tableLen; + NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE]; + NV2080_INTR_CATEGORY_SUBTREE_MAP subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT]; +} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS; + +#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB (96 << 10) // All architectures + +#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE ((48 << 10) * 2048) // Support 2048 channels + +typedef union rpc_message_rpc_union_field_v03_00 +{ + NvU32 spare; + NvU32 cpuRmGfid; +} rpc_message_rpc_union_field_v03_00; + +typedef rpc_message_rpc_union_field_v03_00 rpc_message_rpc_union_field_v; + +typedef struct rpc_message_header_v03_00 +{ + NvU32 header_version; + NvU32 signature; + NvU32 length; + NvU32 function; + NvU32 rpc_result; + NvU32 rpc_result_private; + NvU32 sequence; + rpc_message_rpc_union_field_v u; + rpc_generic_union rpc_message_data[]; +} rpc_message_header_v03_00; + +typedef rpc_message_header_v03_00 rpc_message_header_v; + +typedef struct GSP_MSG_QUEUE_ELEMENT +{ + NvU8 authTagBuffer[16]; // Authentication tag buffer. + NvU8 aadBuffer[16]; // AAD buffer. + NvU32 checkSum; // Set to value needed to make checksum always zero. + NvU32 seqNum; // Sequence number maintained by the message queue. + NvU32 elemCount; // Number of message queue elements this message has. + NV_DECLARE_ALIGNED(rpc_message_header_v rpc, 8); +} GSP_MSG_QUEUE_ELEMENT; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h new file mode 100644 index 000000000000..642c13aec325 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_MSGFN_H__ +#define __NVRM_MSGFN_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#ifndef E +# define E(RPC) NV_VGPU_MSG_EVENT_##RPC, +# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +enum { +#endif + E(FIRST_EVENT = 0x1000) // 0x1000 + E(GSP_INIT_DONE) // 0x1001 + E(GSP_RUN_CPU_SEQUENCER) // 0x1002 + E(POST_EVENT) // 0x1003 + E(RC_TRIGGERED) // 0x1004 + E(MMU_FAULT_QUEUED) // 0x1005 + E(OS_ERROR_LOG) // 0x1006 + E(RG_LINE_INTR) // 0x1007 + E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008 + E(SIM_READ) // 0x1009 + E(SIM_WRITE) // 0x100a + E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b + E(UCODE_LIBOS_PRINT) // 0x100c + E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d + E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e + E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f + E(VGPU_CONFIG) // 0x1010 + E(DISPLAY_MODESET) // 0x1011 + E(EXTDEV_INTR_SERVICE) // 0x1012 + E(NVLINK_INBAND_RECEIVED_DATA_256) // 0x1013 + E(NVLINK_INBAND_RECEIVED_DATA_512) // 0x1014 + E(NVLINK_INBAND_RECEIVED_DATA_1024) // 0x1015 + E(NVLINK_INBAND_RECEIVED_DATA_2048) // 0x1016 + E(NVLINK_INBAND_RECEIVED_DATA_4096) // 0x1017 + E(TIMED_SEMAPHORE_RELEASE) // 0x1018 + E(NVLINK_IS_GPU_DEGRADED) // 0x1019 + E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK) // 0x101a + E(GSP_SEND_USER_SHARED_DATA) // 0x101b + E(NVLINK_FAULT_UP) // 0x101c + E(GSP_LOCKDOWN_NOTICE) // 0x101d + E(MIG_CI_CONFIG_UPDATE) // 0x101e + E(NUM_EVENTS) // END +#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +}; +# undef E +# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +#endif +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h new file mode 100644 index 000000000000..3a04e702677f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_NVDEC_H__ +#define __NVRM_NVDEC_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; + NvU32 engineInstance; // Select NVDEC0 or NVDEC1 or NVDEC2 +} NV_BSP_ALLOCATION_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h new file mode 100644 index 000000000000..203c1d5304d9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_NVENC_H__ +#define __NVRM_NVENC_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of MSENC? + NvU32 engineInstance; // Select MSENC/NVENC0 or NVENC1 or NVENC2 +} NV_MSENC_ALLOCATION_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h new file mode 100644 index 000000000000..71fc53889ec7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_NVJPG_H__ +#define __NVRM_NVJPG_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of NVJPG? + NvU32 engineInstance; +} NV_NVJPG_ALLOCATION_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h new file mode 100644 index 000000000000..49d81c7673d2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_OFA_H__ +#define __NVRM_OFA_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA? +} NV_OFA_ALLOCATION_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h new file mode 100644 index 000000000000..2a037acc6b1e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h @@ -0,0 +1,225 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_RPCFN_H__ +#define __NVRM_RPCFN_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#ifndef X +# define X(UNIT, RPC) NV_VGPU_MSG_FUNCTION_##RPC, +# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +enum { +#endif + X(RM, NOP) // 0 + X(RM, SET_GUEST_SYSTEM_INFO) // 1 + X(RM, ALLOC_ROOT) // 2 + X(RM, ALLOC_DEVICE) // 3 deprecated + X(RM, ALLOC_MEMORY) // 4 + X(RM, ALLOC_CTX_DMA) // 5 + X(RM, ALLOC_CHANNEL_DMA) // 6 + X(RM, MAP_MEMORY) // 7 + X(RM, BIND_CTX_DMA) // 8 deprecated + X(RM, ALLOC_OBJECT) // 9 + X(RM, FREE) //10 + X(RM, LOG) //11 + X(RM, ALLOC_VIDMEM) //12 + X(RM, UNMAP_MEMORY) //13 + X(RM, MAP_MEMORY_DMA) //14 + X(RM, UNMAP_MEMORY_DMA) //15 + X(RM, GET_EDID) //16 + X(RM, ALLOC_DISP_CHANNEL) //17 + X(RM, ALLOC_DISP_OBJECT) //18 + X(RM, ALLOC_SUBDEVICE) //19 + X(RM, ALLOC_DYNAMIC_MEMORY) //20 + X(RM, DUP_OBJECT) //21 + X(RM, IDLE_CHANNELS) //22 + X(RM, ALLOC_EVENT) //23 + X(RM, SEND_EVENT) //24 + X(RM, REMAPPER_CONTROL) //25 deprecated + X(RM, DMA_CONTROL) //26 + X(RM, DMA_FILL_PTE_MEM) //27 + X(RM, MANAGE_HW_RESOURCE) //28 + X(RM, BIND_ARBITRARY_CTX_DMA) //29 deprecated + X(RM, CREATE_FB_SEGMENT) //30 + X(RM, DESTROY_FB_SEGMENT) //31 + X(RM, ALLOC_SHARE_DEVICE) //32 + X(RM, DEFERRED_API_CONTROL) //33 + X(RM, REMOVE_DEFERRED_API) //34 + X(RM, SIM_ESCAPE_READ) //35 + X(RM, SIM_ESCAPE_WRITE) //36 + X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA) //37 + X(RM, FREE_VIDMEM_VIRT) //38 + X(RM, PERF_GET_PSTATE_INFO) //39 deprecated for vGPU, used by GSP + X(RM, PERF_GET_PERFMON_SAMPLE) //40 + X(RM, PERF_GET_VIRTUAL_PSTATE_INFO) //41 deprecated + X(RM, PERF_GET_LEVEL_INFO) //42 + X(RM, MAP_SEMA_MEMORY) //43 + X(RM, UNMAP_SEMA_MEMORY) //44 + X(RM, SET_SURFACE_PROPERTIES) //45 + X(RM, CLEANUP_SURFACE) //46 + X(RM, UNLOADING_GUEST_DRIVER) //47 + X(RM, TDR_SET_TIMEOUT_STATE) //48 + X(RM, SWITCH_TO_VGA) //49 + X(RM, GPU_EXEC_REG_OPS) //50 + X(RM, GET_STATIC_INFO) //51 + X(RM, ALLOC_VIRTMEM) //52 + X(RM, UPDATE_PDE_2) //53 + X(RM, SET_PAGE_DIRECTORY) //54 + X(RM, GET_STATIC_PSTATE_INFO) //55 + X(RM, TRANSLATE_GUEST_GPU_PTES) //56 + X(RM, RESERVED_57) //57 + X(RM, RESET_CURRENT_GR_CONTEXT) //58 + X(RM, SET_SEMA_MEM_VALIDATION_STATE) //59 + X(RM, GET_ENGINE_UTILIZATION) //60 + X(RM, UPDATE_GPU_PDES) //61 + X(RM, GET_ENCODER_CAPACITY) //62 + X(RM, VGPU_PF_REG_READ32) //63 + X(RM, SET_GUEST_SYSTEM_INFO_EXT) //64 + X(GSP, GET_GSP_STATIC_INFO) //65 + X(RM, RMFS_INIT) //66 + X(RM, RMFS_CLOSE_QUEUE) //67 + X(RM, RMFS_CLEANUP) //68 + X(RM, RMFS_TEST) //69 + X(RM, UPDATE_BAR_PDE) //70 + X(RM, CONTINUATION_RECORD) //71 + X(RM, GSP_SET_SYSTEM_INFO) //72 + X(RM, SET_REGISTRY) //73 + X(GSP, GSP_INIT_POST_OBJGPU) //74 deprecated + X(RM, SUBDEV_EVENT_SET_NOTIFICATION) //75 deprecated + X(GSP, GSP_RM_CONTROL) //76 + X(RM, GET_STATIC_INFO2) //77 + X(RM, DUMP_PROTOBUF_COMPONENT) //78 + X(RM, UNSET_PAGE_DIRECTORY) //79 + X(RM, GET_CONSOLIDATED_STATIC_INFO) //80 + X(RM, GMMU_REGISTER_FAULT_BUFFER) //81 deprecated + X(RM, GMMU_UNREGISTER_FAULT_BUFFER) //82 deprecated + X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER) //83 deprecated + X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER) //84 deprecated + X(RM, CTRL_SET_VGPU_FB_USAGE) //85 + X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO) //86 + X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO) //87 + X(RM, CTRL_RESET_CHANNEL) //88 + X(RM, CTRL_RESET_ISOLATED_CHANNEL) //89 + X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT) //90 + X(RM, CTRL_CLK_GET_EXTENDED_INFO) //91 + X(RM, CTRL_PERF_BOOST) //92 + X(RM, CTRL_PERF_VPSTATES_GET_CONTROL) //93 + X(RM, CTRL_GET_ZBC_CLEAR_TABLE) //94 + X(RM, CTRL_SET_ZBC_COLOR_CLEAR) //95 + X(RM, CTRL_SET_ZBC_DEPTH_CLEAR) //96 + X(RM, CTRL_GPFIFO_SCHEDULE) //97 + X(RM, CTRL_SET_TIMESLICE) //98 + X(RM, CTRL_PREEMPT) //99 + X(RM, CTRL_FIFO_DISABLE_CHANNELS) //100 + X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL) //101 + X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL) //102 + X(GSP, GSP_RM_ALLOC) //103 + X(RM, CTRL_GET_P2P_CAPS_V2) //104 + X(RM, CTRL_CIPHER_AES_ENCRYPT) //105 + X(RM, CTRL_CIPHER_SESSION_KEY) //106 + X(RM, CTRL_CIPHER_SESSION_KEY_STATUS) //107 + X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES) //108 + X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES) //109 + X(RM, CTRL_DBG_SET_EXCEPTION_MASK) //110 + X(RM, CTRL_GPU_PROMOTE_CTX) //111 + X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND) //112 + X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE) //113 + X(RM, CTRL_GR_CTXSW_ZCULL_BIND) //114 + X(RM, CTRL_GPU_INITIALIZE_CTX) //115 + X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES) //116 + X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT) //117 + X(RM, CTRL_GET_LATEST_ECC_ADDRESSES) //118 + X(RM, CTRL_MC_SERVICE_INTERRUPTS) //119 + X(RM, CTRL_DMA_SET_DEFAULT_VASPACE) //120 + X(RM, CTRL_GET_CE_PCE_MASK) //121 + X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY) //122 + X(RM, CTRL_GET_NVLINK_PEER_ID_MASK) //123 + X(RM, CTRL_GET_NVLINK_STATUS) //124 + X(RM, CTRL_GET_P2P_CAPS) //125 + X(RM, CTRL_GET_P2P_CAPS_MATRIX) //126 + X(RM, RESERVED_0) //127 + X(RM, CTRL_RESERVE_PM_AREA_SMPC) //128 + X(RM, CTRL_RESERVE_HWPM_LEGACY) //129 + X(RM, CTRL_B0CC_EXEC_REG_OPS) //130 + X(RM, CTRL_BIND_PM_RESOURCES) //131 + X(RM, CTRL_DBG_SUSPEND_CONTEXT) //132 + X(RM, CTRL_DBG_RESUME_CONTEXT) //133 + X(RM, CTRL_DBG_EXEC_REG_OPS) //134 + X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG) //135 + X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE) //136 + X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE) //137 + X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG) //138 + X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE) //139 + X(RM, CTRL_ALLOC_PMA_STREAM) //140 + X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT) //141 + X(RM, CTRL_FB_GET_INFO_V2) //142 + X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES) //143 + X(RM, CTRL_GR_GET_CTX_BUFFER_INFO) //144 + X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES) //145 + X(RM, CTRL_GPU_EVICT_CTX) //146 + X(RM, CTRL_FB_GET_FS_INFO) //147 + X(RM, CTRL_GRMGR_GET_GR_FS_INFO) //148 + X(RM, CTRL_STOP_CHANNEL) //149 + X(RM, CTRL_GR_PC_SAMPLING_MODE) //150 + X(RM, CTRL_PERF_RATED_TDP_GET_STATUS) //151 + X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL) //152 + X(RM, CTRL_FREE_PMA_STREAM) //153 + X(RM, CTRL_TIMER_SET_GR_TICK_FREQ) //154 + X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB) //155 + X(RM, GET_CONSOLIDATED_GR_STATIC_INFO) //156 + X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP) //157 + X(RM, CTRL_GR_GET_TPC_PARTITION_MODE) //158 + X(RM, CTRL_GR_SET_TPC_PARTITION_MODE) //159 + X(UVM, UVM_PAGING_CHANNEL_ALLOCATE) //160 + X(UVM, UVM_PAGING_CHANNEL_DESTROY) //161 + X(UVM, UVM_PAGING_CHANNEL_MAP) //162 + X(UVM, UVM_PAGING_CHANNEL_UNMAP) //163 + X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM) //164 + X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES) //165 + X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION) //166 + X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL) //167 + X(RM, DCE_RM_INIT) //168 + X(RM, REGISTER_VIRTUAL_EVENT_BUFFER) //169 + X(RM, CTRL_EVENT_BUFFER_UPDATE_GET) //170 + X(RM, GET_PLCABLE_ADDRESS_KIND) //171 + X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2) //172 + X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM) //173 + X(RM, CTRL_GET_MMU_DEBUG_MODE) //174 + X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS) //175 + X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE) //176 + X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO) //177 + X(RM, DISABLE_CHANNELS) //178 + X(RM, CTRL_FABRIC_MEMORY_DESCRIBE) //179 + X(RM, CTRL_FABRIC_MEM_STATS) //180 + X(RM, SAVE_HIBERNATION_DATA) //181 + X(RM, RESTORE_HIBERNATION_DATA) //182 + X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED) //183 + X(RM, CTRL_EXEC_PARTITIONS_CREATE) //184 + X(RM, CTRL_EXEC_PARTITIONS_DELETE) //185 + X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN) //186 + X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX) //187 + X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION) //188 + X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK) //189 + X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER) //190 + X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS) // 191 + X(RM, CTRL_BUS_SET_P2P_MAPPING) // 192 + X(RM, CTRL_BUS_UNSET_P2P_MAPPING) // 193 + X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK) // 194 + X(RM, CTRL_GPU_MIGRATABLE_OPS) // 195 + X(RM, CTRL_GET_TOTAL_HS_CREDITS) // 196 + X(RM, CTRL_GET_HS_CREDITS) // 197 + X(RM, CTRL_SET_HS_CREDITS) // 198 + X(RM, CTRL_PM_AREA_PC_SAMPLER) // 199 + X(RM, INVALIDATE_TLB) // 200 + X(RM, RESERVED_201) // 201 + X(RM, ECC_NOTIFIER_WRITE_ACK) // 202 + X(RM, NUM_FUNCTIONS) //END +#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +}; +# undef X +# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +#endif +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h new file mode 100644 index 000000000000..f58edf62e4ae --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_VMM_H__ +#define __NVRM_VMM_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */ + +#define FERMI_VASPACE_A (0x000090f1) + +typedef struct +{ + NvU32 index; + NvV32 flags; + NvU64 vaSize NV_ALIGN_BYTES(8); + NvU64 vaStartInternal NV_ALIGN_BYTES(8); + NvU64 vaLimitInternal NV_ALIGN_BYTES(8); + NvU32 bigPageSize; + NvU64 vaBase NV_ALIGN_BYTES(8); +} NV_VASPACE_ALLOCATION_PARAMETERS; + +#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 // #include -#include -#include +#include "nvrm/ofa.h" struct r535_ofa_obj { struct nvkm_object object; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c index ffb4104a7d8c..5acb98d137bd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c @@ -21,8 +21,7 @@ */ #include -#include -#include +#include "nvrm/rpcfn.h" #define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE #define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c index 94cad290e17e..c697885c65d3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c @@ -21,10 +21,7 @@ */ #include -#include -#include -#include -#include +#include "nvrm/vmm.h" static int r535_mmu_promote_vmm(struct nvkm_vmm *vmm) -- cgit v1.2.3 From befe75ae0db90dcbe94fe2670d94b8b7a4855666 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sat, 15 Feb 2025 02:55:45 +1000 Subject: drm/nouveau/gsp: add gpu hal stubs With GSP-RM handling the majority of the HW programming, NVKM's usual HALs are more elaborate than necessary, resulting in a fair amount of duplicated boilerplate. Adds 'nvkm_rm_gpu' which serves to provide GPU-specific constants and functions in a more streamlined manner. This is initially used in subsequent commits to store engine class IDs, and replace the per-engine/engobj boilerplate with common code for all GSP-RM supported engines - and is further extended when adding GH100, GB10x and GB20x support. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h | 6 ++---- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c | 6 +++--- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c | 12 +++++++++++- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c | 6 +++--- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c | 6 +++--- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h | 7 +++++-- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild | 5 +++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c | 9 +++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c | 9 +++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c | 9 +++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h | 16 ++++++++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 5 ----- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c | 14 ++++++++++++-- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 13 ++++++++++++- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c | 9 +++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c | 6 +++--- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c | 6 +++--- 17 files changed, 114 insertions(+), 30 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 3fd279be8340..eeaf72f6add3 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -209,9 +209,7 @@ struct nvkm_gsp { u8 tpcs; } gr; - const struct nvkm_gsp_rm { - const struct nvkm_rm_api *api; - } *rm; + struct nvkm_rm *rm; struct { struct mutex mutex; @@ -467,7 +465,7 @@ static inline int nvkm_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id, nvkm_gsp_event_func func, struct nvkm_gsp_event *event) { - const struct nvkm_gsp_rm *rm = device->object.client->gsp->rm; + struct nvkm_rm *rm = device->object.client->gsp->rm; return rm->api->device->event.ctor(device, handle, id, func, event); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c index ea2821e7a54e..d7933bfc59fd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c @@ -22,7 +22,7 @@ #include "priv.h" static const struct nvkm_gsp_func -ad102_gsp_r535_113_01 = { +ad102_gsp = { .flcn = &ga102_gsp_flcn, .fwsec = &ga102_gsp_fwsec, @@ -40,12 +40,12 @@ ad102_gsp_r535_113_01 = { .fini = tu102_gsp_fini, .reset = ga102_gsp_reset, - .rm = &r535_gsp_rm, + .rm.gpu = &ad10x_gpu, }; static struct nvkm_gsp_fwif ad102_gsps[] = { - { 0, tu102_gsp_load, &ad102_gsp_r535_113_01, "535.113.01", true }, + { 0, tu102_gsp_load, &ad102_gsp, &r535_rm_ga102, "535.113.01", true }, {} }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c index 78f2a15f0d42..0f8526aa969f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c @@ -102,6 +102,7 @@ nvkm_gsp_dtor(struct nvkm_subdev *subdev) gsp->func->dtor(gsp); nvkm_falcon_dtor(&gsp->falcon); + kfree(gsp->rm); return gsp; } @@ -139,7 +140,16 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device, return PTR_ERR(fwif); gsp->func = fwif->func; - gsp->rm = gsp->func->rm; + + if (fwif->rm) { + gsp->rm = kzalloc(sizeof(*gsp->rm), GFP_KERNEL); + if (!gsp->rm) + return -ENOMEM; + + gsp->rm->device = device; + gsp->rm->gpu = fwif->func->rm.gpu; + gsp->rm->api = fwif->rm->api; + } return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0x110000, &gsp->falcon); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c index d9cdec4810b4..77e3501296c9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c @@ -39,7 +39,7 @@ ga100_gsp_flcn = { }; static const struct nvkm_gsp_func -ga100_gsp_r535_113_01 = { +ga100_gsp = { .flcn = &ga100_gsp_flcn, .fwsec = &tu102_gsp_fwsec, @@ -56,12 +56,12 @@ ga100_gsp_r535_113_01 = { .fini = tu102_gsp_fini, .reset = tu102_gsp_reset, - .rm = &r535_gsp_rm, + .rm.gpu = &ga100_gpu, }; static struct nvkm_gsp_fwif ga100_gsps[] = { - { 0, tu102_gsp_load, &ga100_gsp_r535_113_01, "535.113.01" }, + { 0, tu102_gsp_load, &ga100_gsp, &r535_rm_tu102, "535.113.01" }, { -1, gv100_gsp_nofw, &gv100_gsp }, {} }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c index 7b8db70f3cb3..709a046d86bf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c @@ -150,7 +150,7 @@ ga102_gsp_flcn = { }; static const struct nvkm_gsp_func -ga102_gsp_r535_113_01 = { +ga102_gsp_r535 = { .flcn = &ga102_gsp_flcn, .fwsec = &ga102_gsp_fwsec, @@ -168,7 +168,7 @@ ga102_gsp_r535_113_01 = { .fini = tu102_gsp_fini, .reset = ga102_gsp_reset, - .rm = &r535_gsp_rm, + .rm.gpu = &ga1xx_gpu, }; static const struct nvkm_gsp_func @@ -178,7 +178,7 @@ ga102_gsp = { static struct nvkm_gsp_fwif ga102_gsps[] = { - { 0, tu102_gsp_load, &ga102_gsp_r535_113_01, "535.113.01" }, + { 0, tu102_gsp_load, &ga102_gsp_r535, &r535_rm_ga102, "535.113.01" }, { -1, gv100_gsp_nofw, &ga102_gsp }, {} }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h index e6f0e865848a..de274f6426c1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -2,6 +2,7 @@ #ifndef __NVKM_GSP_PRIV_H__ #define __NVKM_GSP_PRIV_H__ #include +#include enum nvkm_acr_lsf_id; int nvkm_gsp_fwsec_frts(struct nvkm_gsp *); @@ -11,6 +12,7 @@ struct nvkm_gsp_fwif { int version; int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *); const struct nvkm_gsp_func *func; + const struct nvkm_rm_impl *rm; const char *ver; bool enable; }; @@ -51,7 +53,9 @@ struct nvkm_gsp_func { int (*fini)(struct nvkm_gsp *, bool suspend); int (*reset)(struct nvkm_gsp *); - const struct nvkm_gsp_rm *rm; + struct { + const struct nvkm_rm_gpu *gpu; + } rm; }; extern const struct nvkm_falcon_func tu102_gsp_flcn; @@ -73,7 +77,6 @@ void r535_gsp_dtor(struct nvkm_gsp *); int r535_gsp_oneinit(struct nvkm_gsp *); int r535_gsp_init(struct nvkm_gsp *); int r535_gsp_fini(struct nvkm_gsp *, bool suspend); -extern const struct nvkm_gsp_rm r535_gsp_rm; int r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume); int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild index 1c07740215ec..841b690c0c0a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild @@ -2,4 +2,9 @@ # # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +nvkm-y += nvkm/subdev/gsp/rm/tu1xx.o +nvkm-y += nvkm/subdev/gsp/rm/ga100.o +nvkm-y += nvkm/subdev/gsp/rm/ga1xx.o +nvkm-y += nvkm/subdev/gsp/rm/ad10x.o + include $(src)/nvkm/subdev/gsp/rm/r535/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c new file mode 100644 index 000000000000..1e519bf166dd --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "gpu.h" + +const struct nvkm_rm_gpu +ad10x_gpu = { +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c new file mode 100644 index 000000000000..b10e6ff9e9b6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "gpu.h" + +const struct nvkm_rm_gpu +ga100_gpu = { +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c new file mode 100644 index 000000000000..725ccb2c27dc --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "gpu.h" + +const struct nvkm_rm_gpu +ga1xx_gpu = { +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h new file mode 100644 index 000000000000..32d87ce2b77d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __NVKM_RM_GPU_H__ +#define __NVKM_RM_GPU_H__ +#include "rm.h" + +struct nvkm_rm_gpu { +}; + +extern const struct nvkm_rm_gpu tu1xx_gpu; +extern const struct nvkm_rm_gpu ga100_gpu; +extern const struct nvkm_rm_gpu ga1xx_gpu; +extern const struct nvkm_rm_gpu ad10x_gpu; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index 48af5d8d22e8..8ca0f99ccbac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -44,11 +44,6 @@ extern struct dentry *nouveau_debugfs_root; -const struct nvkm_gsp_rm -r535_gsp_rm = { - .api = &r535_rm, -}; - static void r535_gsp_msgq_work(struct work_struct *work) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c index 39cc3d0c740c..3c17b75b5e37 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c @@ -4,11 +4,21 @@ */ #include -const struct nvkm_rm_api -r535_rm = { +static const struct nvkm_rm_api +r535_api = { .rpc = &r535_rpc, .ctrl = &r535_ctrl, .alloc = &r535_alloc, .client = &r535_client, .device = &r535_device, }; + +const struct nvkm_rm_impl +r535_rm_tu102 = { + .api = &r535_api, +}; + +const struct nvkm_rm_impl +r535_rm_ga102 = { + .api = &r535_api, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 20841305fa55..41d4ed70fc10 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -6,6 +6,16 @@ #ifndef __NVKM_RM_H__ #define __NVKM_RM_H__ +struct nvkm_rm_impl { + const struct nvkm_rm_api *api; +}; + +struct nvkm_rm { + struct nvkm_device *device; + const struct nvkm_rm_gpu *gpu; + const struct nvkm_rm_api *api; +}; + struct nvkm_rm_api { const struct nvkm_rm_api_rpc { void *(*get)(struct nvkm_gsp *, u32 fn, u32 argc); @@ -45,7 +55,8 @@ struct nvkm_rm_api { } *device; }; -extern const struct nvkm_rm_api r535_rm; +extern const struct nvkm_rm_impl r535_rm_tu102; +extern const struct nvkm_rm_impl r535_rm_ga102; extern const struct nvkm_rm_api_rpc r535_rpc; extern const struct nvkm_rm_api_ctrl r535_ctrl; extern const struct nvkm_rm_api_alloc r535_alloc; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c new file mode 100644 index 000000000000..d455a4f19854 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "gpu.h" + +const struct nvkm_rm_gpu +tu1xx_gpu = { +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c index 451d3e588d26..fef9c4444017 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c @@ -268,7 +268,7 @@ tu102_gsp_flcn = { }; static const struct nvkm_gsp_func -tu102_gsp_r535_113_01 = { +tu102_gsp = { .flcn = &tu102_gsp_flcn, .fwsec = &tu102_gsp_fwsec, @@ -285,7 +285,7 @@ tu102_gsp_r535_113_01 = { .fini = tu102_gsp_fini, .reset = tu102_gsp_reset, - .rm = &r535_gsp_rm, + .rm.gpu = &tu1xx_gpu, }; static int @@ -336,7 +336,7 @@ done: static struct nvkm_gsp_fwif tu102_gsps[] = { - { 0, tu102_gsp_load, &tu102_gsp_r535_113_01, "535.113.01" }, + { 0, tu102_gsp_load, &tu102_gsp, &r535_rm_tu102, "535.113.01" }, { -1, gv100_gsp_nofw, &gv100_gsp }, {} }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c index 1bc806a18010..5f279813626f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c @@ -22,7 +22,7 @@ #include "priv.h" static const struct nvkm_gsp_func -tu116_gsp_r535_113_01 = { +tu116_gsp = { .flcn = &tu102_gsp_flcn, .fwsec = &tu102_gsp_fwsec, @@ -39,12 +39,12 @@ tu116_gsp_r535_113_01 = { .fini = tu102_gsp_fini, .reset = tu102_gsp_reset, - .rm = &r535_gsp_rm, + .rm.gpu = &tu1xx_gpu, }; static struct nvkm_gsp_fwif tu116_gsps[] = { - { 0, tu102_gsp_load, &tu116_gsp_r535_113_01, "535.113.01" }, + { 0, tu102_gsp_load, &tu116_gsp, &r535_rm_tu102, "535.113.01" }, { -1, gv100_gsp_nofw, &gv100_gsp }, {} }; -- cgit v1.2.3 From 0fac5141d646638d64851cea4f493204faee1282 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 18 Feb 2025 20:11:55 +1000 Subject: drm/nouveau/gsp: add display class ids to gpu hal Use display class IDs from nvkm_rm_gpu, instead of copying them from the non-GSP HALs. Removes the AD102 display HAL, which is no longer required as there's no support for it without GSP-RM. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h | 1 - drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 10 ++--- drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild | 1 - drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c | 52 ---------------------- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c | 10 +++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c | 10 +++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h | 10 +++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c | 35 +++++++++------ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c | 10 +++++ 9 files changed, 67 insertions(+), 72 deletions(-) delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h index 3e8db8280e2a..7903d7470d19 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h @@ -87,5 +87,4 @@ int gp102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct int gv100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); int tu102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); int ga102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); -int ad102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 9093d89b16f3..aa929d3b2941 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2709,7 +2709,7 @@ nv192_chipset = { .timer = { 0x00000001, gk20a_timer_new }, .vfn = { 0x00000001, ga100_vfn_new }, .ce = { 0x0000001f, ga102_ce_new }, - .disp = { 0x00000001, ad102_disp_new }, + .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ad102_gr_new }, @@ -2735,7 +2735,7 @@ nv193_chipset = { .timer = { 0x00000001, gk20a_timer_new }, .vfn = { 0x00000001, ga100_vfn_new }, .ce = { 0x0000001f, ga102_ce_new }, - .disp = { 0x00000001, ad102_disp_new }, + .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ad102_gr_new }, @@ -2761,7 +2761,7 @@ nv194_chipset = { .timer = { 0x00000001, gk20a_timer_new }, .vfn = { 0x00000001, ga100_vfn_new }, .ce = { 0x0000001f, ga102_ce_new }, - .disp = { 0x00000001, ad102_disp_new }, + .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ad102_gr_new }, @@ -2787,7 +2787,7 @@ nv196_chipset = { .timer = { 0x00000001, gk20a_timer_new }, .vfn = { 0x00000001, ga100_vfn_new }, .ce = { 0x0000001f, ga102_ce_new }, - .disp = { 0x00000001, ad102_disp_new }, + .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ad102_gr_new }, @@ -2813,7 +2813,7 @@ nv197_chipset = { .timer = { 0x00000001, gk20a_timer_new }, .vfn = { 0x00000001, ga100_vfn_new }, .ce = { 0x0000001f, ga102_ce_new }, - .disp = { 0x00000001, ad102_disp_new }, + .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ad102_gr_new }, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild index 23a10e081081..e1aecd3fe96c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild @@ -27,7 +27,6 @@ nvkm-y += nvkm/engine/disp/gp102.o nvkm-y += nvkm/engine/disp/gv100.o nvkm-y += nvkm/engine/disp/tu102.o nvkm-y += nvkm/engine/disp/ga102.o -nvkm-y += nvkm/engine/disp/ad102.o nvkm-y += nvkm/engine/disp/udisp.o nvkm-y += nvkm/engine/disp/uconn.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c deleted file mode 100644 index 7f300a79aa29..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" -#include "chan.h" - -#include - -#include - -static const struct nvkm_disp_func -ad102_disp = { - .uevent = &gv100_disp_chan_uevent, - .ramht_size = 0x2000, - .root = { 0, 0,AD102_DISP }, - .user = { - {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new }, - {{ 0, 0,GA102_DISP_CURSOR }, nvkm_disp_chan_new, &gv100_disp_curs }, - {{ 0, 0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, nvkm_disp_wndw_new, &gv100_disp_wimm }, - {{ 0, 0,AD102_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gv100_disp_core }, - {{ 0, 0,GA102_DISP_WINDOW_CHANNEL_DMA }, nvkm_disp_wndw_new, &gv100_disp_wndw }, - {} - }, -}; - -int -ad102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_disp **pdisp) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_disp_new(&ad102_disp, device, type, inst, pdisp); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c index 1e519bf166dd..bdeaffbb1077 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c @@ -4,6 +4,16 @@ */ #include "gpu.h" +#include + const struct nvkm_rm_gpu ad10x_gpu = { + .disp.class = { + .root = AD102_DISP, + .caps = GV100_DISP_CAPS, + .core = AD102_DISP_CORE_CHANNEL_DMA, + .wndw = GA102_DISP_WINDOW_CHANNEL_DMA, + .wimm = GA102_DISP_WINDOW_IMM_CHANNEL_DMA, + .curs = GA102_DISP_CURSOR, + }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c index 725ccb2c27dc..d4b67ccac608 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c @@ -4,6 +4,16 @@ */ #include "gpu.h" +#include + const struct nvkm_rm_gpu ga1xx_gpu = { + .disp.class = { + .root = GA102_DISP, + .caps = GV100_DISP_CAPS, + .core = GA102_DISP_CORE_CHANNEL_DMA, + .wndw = GA102_DISP_WINDOW_CHANNEL_DMA, + .wimm = GA102_DISP_WINDOW_IMM_CHANNEL_DMA, + .curs = GA102_DISP_CURSOR, + }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h index 32d87ce2b77d..7f3b5f3fd32b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h @@ -7,6 +7,16 @@ #include "rm.h" struct nvkm_rm_gpu { + struct { + struct { + u32 root; + u32 caps; + u32 core; + u32 wndw; + u32 wimm; + u32 curs; + } class; + } disp; }; extern const struct nvkm_rm_gpu tu1xx_gpu; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c index 475faa35361a..e65f9074e94f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c @@ -34,6 +34,8 @@ #include #include +#include + #include #include "nvrm/disp.h" @@ -1676,6 +1678,7 @@ int r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp) { + const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu; struct nvkm_disp_func *rm; int ret; @@ -1691,20 +1694,26 @@ r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device, rm->sor.new = r535_sor_new; rm->ramht_size = hw->ramht_size; - rm->root = hw->root; + rm->root.oclass = gpu->disp.class.root; - for (int i = 0; hw->user[i].ctor; i++) { - switch (hw->user[i].base.oclass & 0xff) { - case 0x73: rm->user[i] = hw->user[i]; break; - case 0x7d: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_core; break; - case 0x7e: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wndw; break; - case 0x7b: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wimm; break; - case 0x7a: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_curs; break; - default: - WARN_ON(1); - continue; - } - } + rm->user[0].base.oclass = gpu->disp.class.caps; + rm->user[0].ctor = gv100_disp_caps_new; + + rm->user[1].base.oclass = gpu->disp.class.core; + rm->user[1].ctor = nvkm_disp_core_new; + rm->user[1].chan = &r535_core; + + rm->user[2].base.oclass = gpu->disp.class.wndw; + rm->user[2].ctor = nvkm_disp_wndw_new; + rm->user[2].chan = &r535_wndw; + + rm->user[3].base.oclass = gpu->disp.class.wimm; + rm->user[3].ctor = nvkm_disp_wndw_new; + rm->user[3].chan = &r535_wimm; + + rm->user[4].base.oclass = gpu->disp.class.curs; + rm->user[4].ctor = nvkm_disp_chan_new; + rm->user[4].chan = &r535_curs; ret = nvkm_disp_new_(rm, device, type, inst, pdisp); if (ret) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c index d455a4f19854..add98b2f3b6d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c @@ -4,6 +4,16 @@ */ #include "gpu.h" +#include + const struct nvkm_rm_gpu tu1xx_gpu = { + .disp.class = { + .root = TU102_DISP, + .caps = GV100_DISP_CAPS, + .core = TU102_DISP_CORE_CHANNEL_DMA, + .wndw = TU102_DISP_WINDOW_CHANNEL_DMA, + .wimm = TU102_DISP_WINDOW_IMM_CHANNEL_DMA, + .curs = TU102_DISP_CURSOR, + }, }; -- cgit v1.2.3 From cd3c62282b61d634b0ed098bb772680088282227 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 18 Feb 2025 20:33:39 +1000 Subject: drm/nouveau/gsp: add usermode class id to gpu hal Use usermode class ID from nvkm_rm_gpu, instead of copying it from the non-GSP HALs. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c | 2 ++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c | 3 +++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c | 2 ++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h | 4 ++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c | 2 ++ drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c | 11 +++++++++-- 6 files changed, 22 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c index bdeaffbb1077..170264d2a61b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c @@ -16,4 +16,6 @@ ad10x_gpu = { .wimm = GA102_DISP_WINDOW_IMM_CHANNEL_DMA, .curs = GA102_DISP_CURSOR, }, + + .usermode.class = AMPERE_USERMODE_A, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c index b10e6ff9e9b6..164f46e0a93b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c @@ -4,6 +4,9 @@ */ #include "gpu.h" +#include + const struct nvkm_rm_gpu ga100_gpu = { + .usermode.class = AMPERE_USERMODE_A, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c index d4b67ccac608..f1d4778c4bc3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c @@ -16,4 +16,6 @@ ga1xx_gpu = { .wimm = GA102_DISP_WINDOW_IMM_CHANNEL_DMA, .curs = GA102_DISP_CURSOR, }, + + .usermode.class = AMPERE_USERMODE_A, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h index 7f3b5f3fd32b..7d005f73326e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h @@ -17,6 +17,10 @@ struct nvkm_rm_gpu { u32 curs; } class; } disp; + + struct { + u32 class; + } usermode; }; extern const struct nvkm_rm_gpu tu1xx_gpu; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c index add98b2f3b6d..7aea54dd89ae 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c @@ -16,4 +16,6 @@ tu1xx_gpu = { .wimm = TU102_DISP_WINDOW_IMM_CHANNEL_DMA, .curs = TU102_DISP_CURSOR, }, + + .usermode.class = TURING_USERMODE_A, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c index dce337306cab..9446049642e1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c @@ -21,6 +21,8 @@ */ #include "priv.h" +#include + static void r535_vfn_dtor(struct nvkm_vfn *vfn) { @@ -32,6 +34,7 @@ r535_vfn_new(const struct nvkm_vfn_func *hw, struct nvkm_device *device, enum nvkm_subdev_type type, int inst, u32 addr, struct nvkm_vfn **pvfn) { + const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu; struct nvkm_vfn_func *rm; int ret; @@ -39,8 +42,12 @@ r535_vfn_new(const struct nvkm_vfn_func *hw, return -ENOMEM; rm->dtor = r535_vfn_dtor; - rm->intr = hw->intr; - rm->user = hw->user; + rm->intr = &tu102_vfn_intr, + rm->user.addr = 0x030000; + rm->user.size = 0x010000; + rm->user.base.minver = -1; + rm->user.base.maxver = -1; + rm->user.base.oclass = gpu->usermode.class; ret = nvkm_vfn_new_(rm, device, type, inst, addr, pvfn); if (ret) -- cgit v1.2.3 From 678bb27e8473c90841636e0232680c7c94f76aec Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 18 Feb 2025 20:51:56 +1000 Subject: drm/nouveau/gsp: add channel class id to gpu hal Use channel class ID from nvkm_rm_gpu, instead of copying it from the non-GSP HALs. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c | 4 ++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c | 4 ++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c | 4 ++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h | 6 ++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c | 11 ++++------- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c | 4 ++++ 6 files changed, 26 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c index 170264d2a61b..d5b64da712bc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c @@ -18,4 +18,8 @@ ad10x_gpu = { }, .usermode.class = AMPERE_USERMODE_A, + + .fifo.chan = { + .class = AMPERE_CHANNEL_GPFIFO_A, + }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c index 164f46e0a93b..9bf80e196149 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c @@ -9,4 +9,8 @@ const struct nvkm_rm_gpu ga100_gpu = { .usermode.class = AMPERE_USERMODE_A, + + .fifo.chan = { + .class = AMPERE_CHANNEL_GPFIFO_A, + }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c index f1d4778c4bc3..55c90148a0d1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c @@ -18,4 +18,8 @@ ga1xx_gpu = { }, .usermode.class = AMPERE_USERMODE_A, + + .fifo.chan = { + .class = AMPERE_CHANNEL_GPFIFO_A, + }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h index 7d005f73326e..4aeeb4b32dc8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h @@ -21,6 +21,12 @@ struct nvkm_rm_gpu { struct { u32 class; } usermode; + + struct { + struct { + u32 class; + } chan; + } fifo; }; extern const struct nvkm_rm_gpu tu1xx_gpu; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c index 594a6bbb1db2..28ac97415e8f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c @@ -31,6 +31,8 @@ #include #include +#include + #include #include "nvrm/fifo.h" @@ -216,10 +218,6 @@ r535_chan = { .doorbell_handle = r535_chan_doorbell_handle, }; -static const struct nvkm_cgrp_func -r535_cgrp = { -}; - static int r535_engn_nonstall(struct nvkm_engn *engn) { @@ -522,6 +520,7 @@ int r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo) { + const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu; struct nvkm_fifo_func *rm; if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) @@ -530,9 +529,7 @@ r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device, rm->dtor = r535_fifo_dtor; rm->runl_ctor = r535_fifo_runl_ctor; rm->runl = &r535_runl; - rm->cgrp = hw->cgrp; - rm->cgrp.func = &r535_cgrp; - rm->chan = hw->chan; + rm->chan.user.oclass = gpu->fifo.chan.class; rm->chan.func = &r535_chan; rm->nonstall = &ga100_fifo_nonstall; rm->nonstall_ctor = ga100_fifo_nonstall_ctor; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c index 7aea54dd89ae..bb674b9cef69 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c @@ -18,4 +18,8 @@ tu1xx_gpu = { }, .usermode.class = TURING_USERMODE_A, + + .fifo.chan = { + .class = TURING_CHANNEL_GPFIFO_A, + }, }; -- cgit v1.2.3 From 7c2d25f1e408bb7d18b867718f9961de3c2f23da Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sat, 15 Feb 2025 03:21:47 +1000 Subject: drm/nouveau/gsp: add common code for engines/engine objects With minimal to no direct HW programming required, most nvkm_engine implementations are nearly identical when running on top of GSP-RM. Add a common implementation of the boilerplate, and use nvkm_rm_gpu to expose the correct class IDs. As they're now handled by common code, and there's no support for them prior to GSP-RM support - this deletes the GA100 NVDEC/NVJPG/OFA HALs, the GA102 NVENC/OFA HALs, and the AD102 GR/NVDEC/NVENC/NVJPG/OFA HALs. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvif/class.h | 2 + drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h | 1 - .../gpu/drm/nouveau/include/nvkm/engine/nvdec.h | 2 - .../gpu/drm/nouveau/include/nvkm/engine/nvenc.h | 2 - .../gpu/drm/nouveau/include/nvkm/engine/nvjpg.h | 8 - drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h | 9 - drivers/gpu/drm/nouveau/nvkm/engine/Kbuild | 2 - drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 38 ----- drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h | 2 - drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild | 1 - drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c | 46 ----- drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h | 2 - drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild | 2 - drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c | 44 ----- drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c | 44 ----- drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c | 12 +- drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h | 3 - drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c | 12 +- drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild | 2 - drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c | 44 ----- drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c | 44 ----- drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h | 3 - drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c | 12 +- drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild | 3 - drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c | 44 ----- drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c | 44 ----- drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h | 8 - drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild | 4 - drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c | 44 ----- drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c | 44 ----- drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c | 44 ----- drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h | 8 - drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild | 4 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c | 11 ++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c | 189 +++++++++++++++++++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h | 20 +++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c | 9 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c | 11 ++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h | 29 ++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c | 56 ++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h | 38 +++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c | 33 ++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c | 33 ++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c | 81 ++------- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c | 14 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c | 142 ++-------------- .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c | 84 ++------- .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c | 84 ++------- .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c | 81 ++------- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c | 80 +-------- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 15 ++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c | 10 ++ 58 files changed, 546 insertions(+), 1073 deletions(-) delete mode 100644 drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h delete mode 100644 drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c delete mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index 824e052dcc25..71a2a53bff7f 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -189,6 +189,7 @@ #define TURING_A /* cl9097.h */ 0x0000c597 +#define AMPERE_A 0x0000c697 #define AMPERE_B /* cl9097.h */ 0x0000c797 #define ADA_A /* cl9097.h */ 0x0000c997 @@ -246,6 +247,7 @@ #define PASCAL_COMPUTE_B 0x0000c1c0 #define VOLTA_COMPUTE_A 0x0000c3c0 #define TURING_COMPUTE_A 0x0000c5c0 +#define AMPERE_COMPUTE_A 0x0000c6c0 #define AMPERE_COMPUTE_B 0x0000c7c0 #define ADA_COMPUTE_A 0x0000c9c0 diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h index 8145796ffc61..a2333cfe6955 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h @@ -55,5 +55,4 @@ int gp10b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n int gv100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); int tu102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); int ga102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); -int ad102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h index 8d2e170883e1..ca83caa55157 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h @@ -13,7 +13,5 @@ struct nvkm_nvdec { int gm107_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **); int tu102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **); -int ga100_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **); int ga102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **); -int ad102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h index 018c58fc32ba..1f6eef13f872 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h @@ -13,6 +13,4 @@ struct nvkm_nvenc { int gm107_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **); int tu102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **); -int ga102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **); -int ad102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h deleted file mode 100644 index 80b7933a789e..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -#ifndef __NVKM_NVJPG_H__ -#define __NVKM_NVJPG_H__ -#include - -int ga100_nvjpg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); -int ad102_nvjpg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h deleted file mode 100644 index e72e2115333b..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h +++ /dev/null @@ -1,9 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -#ifndef __NVKM_OFA_H__ -#define __NVKM_OFA_H__ -#include - -int ga100_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); -int ga102_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); -int ad102_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild index 2e48b0816670..ddcf8782d6b6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild @@ -17,8 +17,6 @@ include $(src)/nvkm/engine/msppp/Kbuild include $(src)/nvkm/engine/msvld/Kbuild include $(src)/nvkm/engine/nvenc/Kbuild include $(src)/nvkm/engine/nvdec/Kbuild -include $(src)/nvkm/engine/nvjpg/Kbuild -include $(src)/nvkm/engine/ofa/Kbuild include $(src)/nvkm/engine/sec/Kbuild include $(src)/nvkm/engine/sec2/Kbuild include $(src)/nvkm/engine/sw/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c index 9427a592bd16..1c0c60138706 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c @@ -90,7 +90,7 @@ ga100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { if (nvkm_gsp_rm(device->gsp)) - return r535_ce_new(&ga100_ce, device, type, inst, pengine); + return -ENODEV; return nvkm_engine_new_(&ga100_ce, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c index ce56ede7c2e9..9359c5e7aa3a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c @@ -44,7 +44,7 @@ ga102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { if (nvkm_gsp_rm(device->gsp)) - return r535_ce_new(&ga102_ce, device, type, inst, pengine); + return -ENODEV; return nvkm_engine_new_(&ga102_ce, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c index 7c8647dcb349..67d0545cf902 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c @@ -40,7 +40,7 @@ tu102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { if (nvkm_gsp_rm(device->gsp)) - return r535_ce_new(&tu102_ce, device, type, inst, pengine); + return -ENODEV; return nvkm_engine_new_(&tu102_ce, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index aa929d3b2941..0cd20d0f8782 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2529,9 +2529,6 @@ nv170_chipset = { .vfn = { 0x00000001, ga100_vfn_new }, .ce = { 0x000003ff, ga100_ce_new }, .fifo = { 0x00000001, ga100_fifo_new }, - .nvdec = { 0x0000001f, ga100_nvdec_new }, - .nvjpg = { 0x00000001, ga100_nvjpg_new }, - .ofa = { 0x00000001, ga100_ofa_new }, }; static const struct nvkm_device_chip @@ -2561,8 +2558,6 @@ nv172_chipset = { .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ga102_gr_new }, .nvdec = { 0x00000003, ga102_nvdec_new }, - .nvenc = { 0x00000001, ga102_nvenc_new }, - .ofa = { 0x00000001, ga102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2593,8 +2588,6 @@ nv173_chipset = { .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ga102_gr_new }, .nvdec = { 0x00000003, ga102_nvdec_new }, - .nvenc = { 0x00000001, ga102_nvenc_new }, - .ofa = { 0x00000001, ga102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2625,8 +2618,6 @@ nv174_chipset = { .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ga102_gr_new }, .nvdec = { 0x00000003, ga102_nvdec_new }, - .nvenc = { 0x00000001, ga102_nvenc_new }, - .ofa = { 0x00000001, ga102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2657,8 +2648,6 @@ nv176_chipset = { .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ga102_gr_new }, .nvdec = { 0x00000003, ga102_nvdec_new }, - .nvenc = { 0x00000001, ga102_nvenc_new }, - .ofa = { 0x00000001, ga102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2689,8 +2678,6 @@ nv177_chipset = { .fifo = { 0x00000001, ga102_fifo_new }, .gr = { 0x00000001, ga102_gr_new }, .nvdec = { 0x00000003, ga102_nvdec_new }, - .nvenc = { 0x00000001, ga102_nvenc_new }, - .ofa = { 0x00000001, ga102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2712,11 +2699,6 @@ nv192_chipset = { .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, - .gr = { 0x00000001, ad102_gr_new }, - .nvdec = { 0x0000000f, ad102_nvdec_new }, - .nvenc = { 0x00000007, ad102_nvenc_new }, - .nvjpg = { 0x0000000f, ad102_nvjpg_new }, - .ofa = { 0x00000001, ad102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2738,11 +2720,6 @@ nv193_chipset = { .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, - .gr = { 0x00000001, ad102_gr_new }, - .nvdec = { 0x0000000f, ad102_nvdec_new }, - .nvenc = { 0x00000007, ad102_nvenc_new }, - .nvjpg = { 0x0000000f, ad102_nvjpg_new }, - .ofa = { 0x00000001, ad102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2764,11 +2741,6 @@ nv194_chipset = { .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, - .gr = { 0x00000001, ad102_gr_new }, - .nvdec = { 0x0000000f, ad102_nvdec_new }, - .nvenc = { 0x00000007, ad102_nvenc_new }, - .nvjpg = { 0x0000000f, ad102_nvjpg_new }, - .ofa = { 0x00000001, ad102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2790,11 +2762,6 @@ nv196_chipset = { .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, - .gr = { 0x00000001, ad102_gr_new }, - .nvdec = { 0x0000000f, ad102_nvdec_new }, - .nvenc = { 0x00000007, ad102_nvenc_new }, - .nvjpg = { 0x0000000f, ad102_nvjpg_new }, - .ofa = { 0x00000001, ad102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; @@ -2816,11 +2783,6 @@ nv197_chipset = { .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, .fifo = { 0x00000001, ga102_fifo_new }, - .gr = { 0x00000001, ad102_gr_new }, - .nvdec = { 0x0000000f, ad102_nvdec_new }, - .nvenc = { 0x00000007, ad102_nvenc_new }, - .nvjpg = { 0x0000000f, ad102_nvjpg_new }, - .ofa = { 0x00000001, ad102_ofa_new }, .sec2 = { 0x00000001, ga102_sec2_new }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h index e42b18820a95..8da5e896dd74 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h @@ -43,8 +43,6 @@ #include #include #include -#include -#include #include #include #include diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild index 487fcc14b9a9..b5418f05ccd8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild @@ -41,7 +41,6 @@ nvkm-y += nvkm/engine/gr/gp10b.o nvkm-y += nvkm/engine/gr/gv100.o nvkm-y += nvkm/engine/gr/tu102.o nvkm-y += nvkm/engine/gr/ga102.o -nvkm-y += nvkm/engine/gr/ad102.o nvkm-y += nvkm/engine/gr/ctxnv40.o nvkm-y += nvkm/engine/gr/ctxnv50.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c deleted file mode 100644 index 7bfa6240d283..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "gf100.h" - -#include - -#include - -static const struct gf100_gr_func -ad102_gr = { - .sclass = { - { -1, -1, FERMI_TWOD_A }, - { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, - { -1, -1, ADA_A }, - { -1, -1, ADA_COMPUTE_A }, - {} - } -}; - -int -ad102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_gr_new(&ad102_gr, device, type, inst, pgr); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c index d285c597aff9..2b51f1d0c281 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c @@ -352,7 +352,7 @@ int ga102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { if (nvkm_gsp_rm(device->gsp)) - return r535_gr_new(&ga102_gr, device, type, inst, pgr); + return -ENODEV; return gf100_gr_new_(ga102_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index b0e0c9305034..54f686ba39ac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h @@ -445,6 +445,4 @@ void gp108_gr_acr_bld_patch(struct nvkm_acr *, u32, s64); int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gr **); -int r535_gr_new(const struct gf100_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int, - struct nvkm_gr **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c index b7a458e9040a..bda8054c6b59 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c @@ -219,7 +219,7 @@ int tu102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { if (nvkm_gsp_rm(device->gsp)) - return r535_gr_new(&tu102_gr, device, type, inst, pgr); + return -ENODEV; return gf100_gr_new_(tu102_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild index 5cc317abc42c..37b0cdc760c7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild @@ -2,6 +2,4 @@ nvkm-y += nvkm/engine/nvdec/base.o nvkm-y += nvkm/engine/nvdec/gm107.o nvkm-y += nvkm/engine/nvdec/tu102.o -nvkm-y += nvkm/engine/nvdec/ga100.o nvkm-y += nvkm/engine/nvdec/ga102.o -nvkm-y += nvkm/engine/nvdec/ad102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c deleted file mode 100644 index d72b3aae9a2b..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include - -#include - -static const struct nvkm_engine_func -ad102_nvdec = { - .sclass = { - { -1, -1, NVC9B0_VIDEO_DECODER }, - {} - } -}; - -int -ad102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_nvdec **pnvdec) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_nvdec_new(&ad102_nvdec, device, type, inst, pnvdec); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c deleted file mode 100644 index 932934227b9c..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include - -#include - -static const struct nvkm_engine_func -ga100_nvdec = { - .sclass = { - { -1, -1, NVC6B0_VIDEO_DECODER }, - {} - } -}; - -int -ga100_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_nvdec **pnvdec) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_nvdec_new(&ga100_nvdec, device, type, inst, pnvdec); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c index 022a9c824304..eea6368adae2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c @@ -23,16 +23,6 @@ #include -#include - -static const struct nvkm_engine_func -ga102_nvdec_gsp = { - .sclass = { - { -1, -1, NVC7B0_VIDEO_DECODER }, - {} - } -}; - static const struct nvkm_falcon_func ga102_nvdec_flcn = { .disable = gm200_flcn_disable, @@ -67,7 +57,7 @@ ga102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst struct nvkm_nvdec **pnvdec) { if (nvkm_gsp_rm(device->gsp)) - return r535_nvdec_new(&ga102_nvdec_gsp, device, type, inst, pnvdec); + return -ENODEV; return nvkm_nvdec_new_(ga102_nvdec_fwif, device, type, inst, 0x848000, pnvdec); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h index f506ae83bfd7..f8d43e913093 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h @@ -20,7 +20,4 @@ extern const struct nvkm_nvdec_fwif gm107_nvdec_fwif[]; int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *, enum nvkm_subdev_type, int, u32 addr, struct nvkm_nvdec **); - -int r535_nvdec_new(const struct nvkm_engine_func *, struct nvkm_device *, - enum nvkm_subdev_type, int, struct nvkm_nvdec **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c index 808c8e010b9e..fe95b6e22f21 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c @@ -23,22 +23,12 @@ #include -#include - -static const struct nvkm_engine_func -tu102_nvdec = { - .sclass = { - { -1, -1, NVC4B0_VIDEO_DECODER }, - {} - } -}; - int tu102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec) { if (nvkm_gsp_rm(device->gsp)) - return r535_nvdec_new(&tu102_nvdec, device, type, inst, pnvdec); + return -ENODEV; return nvkm_nvdec_new_(gm107_nvdec_fwif, device, type, inst, 0, pnvdec); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild index 3d71f2973dab..6dcb20d1d156 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild @@ -2,5 +2,3 @@ nvkm-y += nvkm/engine/nvenc/base.o nvkm-y += nvkm/engine/nvenc/gm107.o nvkm-y += nvkm/engine/nvenc/tu102.o -nvkm-y += nvkm/engine/nvenc/ga102.o -nvkm-y += nvkm/engine/nvenc/ad102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c deleted file mode 100644 index 1b4619ff9e8e..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include - -#include - -static const struct nvkm_engine_func -ad102_nvenc = { - .sclass = { - { -1, -1, NVC9B7_VIDEO_ENCODER }, - {} - } -}; - -int -ad102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_nvenc **pnvenc) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_nvenc_new(&ad102_nvenc, device, type, inst, pnvenc); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c deleted file mode 100644 index 6463ab8e5871..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include - -#include - -static const struct nvkm_engine_func -ga102_nvenc = { - .sclass = { - { -1, -1, NVC7B7_VIDEO_ENCODER }, - {} - } -}; - -int -ga102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_nvenc **pnvenc) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_nvenc_new(&ga102_nvenc, device, type, inst, pnvenc); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h index 7917affc6505..b097e3f2867b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h @@ -18,7 +18,4 @@ extern const struct nvkm_nvenc_fwif gm107_nvenc_fwif[]; int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_nvenc **pnvenc); - -int r535_nvenc_new(const struct nvkm_engine_func *, struct nvkm_device *, - enum nvkm_subdev_type, int, struct nvkm_nvenc **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c index 933864423bb3..8a436b398749 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c @@ -23,22 +23,12 @@ #include -#include - -static const struct nvkm_engine_func -tu102_nvenc = { - .sclass = { - { -1, -1, NVC4B7_VIDEO_ENCODER }, - {} - } -}; - int tu102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc) { if (nvkm_gsp_rm(device->gsp)) - return r535_nvenc_new(&tu102_nvenc, device, type, inst, pnvenc); + return -ENODEV; return nvkm_nvenc_new_(gm107_nvenc_fwif, device, type, inst, pnvenc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild deleted file mode 100644 index 1d9bddd68605..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-License-Identifier: MIT -nvkm-y += nvkm/engine/nvjpg/ga100.o -nvkm-y += nvkm/engine/nvjpg/ad102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c deleted file mode 100644 index 62705dc6494c..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include - -#include - -static const struct nvkm_engine_func -ad102_nvjpg = { - .sclass = { - { -1, -1, NVC9D1_VIDEO_NVJPG }, - {} - } -}; - -int -ad102_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_engine **pengine) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_nvjpg_new(&ad102_nvjpg, device, type, inst, pengine); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c deleted file mode 100644 index f550eb07da5a..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include - -#include - -static const struct nvkm_engine_func -ga100_nvjpg = { - .sclass = { - { -1, -1, NVC4D1_VIDEO_NVJPG }, - {} - } -}; - -int -ga100_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_engine **pengine) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_nvjpg_new(&ga100_nvjpg, device, type, inst, pengine); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h deleted file mode 100644 index 1e80cf70033a..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -#ifndef __NVKM_NVJPG_PRIV_H__ -#define __NVKM_NVJPG_PRIV_H__ -#include - -int r535_nvjpg_new(const struct nvkm_engine_func *, struct nvkm_device *, - enum nvkm_subdev_type, int, struct nvkm_engine **); -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild deleted file mode 100644 index 3faf73b35f5a..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: MIT -nvkm-y += nvkm/engine/ofa/ga100.o -nvkm-y += nvkm/engine/ofa/ga102.o -nvkm-y += nvkm/engine/ofa/ad102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c deleted file mode 100644 index 7ac87ef26aec..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include - -#include - -static const struct nvkm_engine_func -ad102_ofa = { - .sclass = { - { -1, -1, NVC9FA_VIDEO_OFA }, - {} - } -}; - -int -ad102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_engine **pengine) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_ofa_new(&ad102_ofa, device, type, inst, pengine); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c deleted file mode 100644 index ef474f61a1b5..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include - -#include - -static const struct nvkm_engine_func -ga100_ofa = { - .sclass = { - { -1, -1, NVC6FA_VIDEO_OFA }, - {} - } -}; - -int -ga100_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_engine **pengine) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_ofa_new(&ga100_ofa, device, type, inst, pengine); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c deleted file mode 100644 index bea255529993..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2023 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#include "priv.h" - -#include - -#include - -static const struct nvkm_engine_func -ga102_ofa = { - .sclass = { - { -1, -1, NVC7FA_VIDEO_OFA }, - {} - } -}; - -int -ga102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, - struct nvkm_engine **pengine) -{ - if (nvkm_gsp_rm(device->gsp)) - return r535_ofa_new(&ga102_ofa, device, type, inst, pengine); - - return -ENODEV; -} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h deleted file mode 100644 index caf29e6bddb4..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -#ifndef __NVKM_OFA_PRIV_H__ -#define __NVKM_OFA_PRIV_H__ -#include - -int r535_ofa_new(const struct nvkm_engine_func *, struct nvkm_device *, - enum nvkm_subdev_type, int, struct nvkm_engine **); -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild index 841b690c0c0a..e5d5f8880d31 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild @@ -1,6 +1,10 @@ # SPDX-License-Identifier: MIT # # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +nvkm-y += nvkm/subdev/gsp/rm/engine.o +nvkm-y += nvkm/subdev/gsp/rm/gr.o +nvkm-y += nvkm/subdev/gsp/rm/nvdec.o +nvkm-y += nvkm/subdev/gsp/rm/nvenc.o nvkm-y += nvkm/subdev/gsp/rm/tu1xx.o nvkm-y += nvkm/subdev/gsp/rm/ga100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c index d5b64da712bc..d699c386adec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c @@ -22,4 +22,15 @@ ad10x_gpu = { .fifo.chan = { .class = AMPERE_CHANNEL_GPFIFO_A, }, + + .ce.class = AMPERE_DMA_COPY_B, + .gr.class = { + .i2m = KEPLER_INLINE_TO_MEMORY_B, + .twod = FERMI_TWOD_A, + .threed = ADA_A, + .compute = ADA_COMPUTE_A, + }, + .nvdec.class = NVC9B0_VIDEO_DECODER, + .nvenc.class = NVC9B7_VIDEO_ENCODER, + .ofa.class = NVC9FA_VIDEO_OFA, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c new file mode 100644 index 000000000000..3b0e83b2f57f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "engine.h" +#include "gpu.h" + +#include +#include + +struct nvkm_rm_engine { + struct nvkm_engine engine; + + struct nvkm_engine_func func; +}; + +struct nvkm_rm_engine_obj { + struct nvkm_object object; + struct nvkm_gsp_object rm; +}; + +static void* +nvkm_rm_engine_obj_dtor(struct nvkm_object *object) +{ + struct nvkm_rm_engine_obj *obj = container_of(object, typeof(*obj), object); + + nvkm_gsp_rm_free(&obj->rm); + return obj; +} + +static const struct nvkm_object_func +nvkm_rm_engine_obj = { + .dtor = nvkm_rm_engine_obj_dtor, +}; + +int +nvkm_rm_engine_obj_new(struct nvkm_gsp_object *chan, int chid, const struct nvkm_oclass *oclass, + struct nvkm_object **pobject) +{ + struct nvkm_rm *rm = chan->client->gsp->rm; + const int inst = oclass->engine->subdev.inst; + const u32 class = oclass->base.oclass; + const u32 handle = oclass->handle; + struct nvkm_rm_engine_obj *obj; + int ret; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return -ENOMEM; + + switch (oclass->engine->subdev.type) { + case NVKM_ENGINE_CE: + ret = rm->api->ce->alloc(chan, handle, class, inst, &obj->rm); + break; + case NVKM_ENGINE_GR: + ret = nvkm_gsp_rm_alloc(chan, handle, class, 0, &obj->rm); + break; + case NVKM_ENGINE_NVDEC: + ret = rm->api->nvdec->alloc(chan, handle, class, inst, &obj->rm); + break; + case NVKM_ENGINE_NVENC: + ret = rm->api->nvenc->alloc(chan, handle, class, inst, &obj->rm); + break; + case NVKM_ENGINE_NVJPG: + ret = rm->api->nvjpg->alloc(chan, handle, class, inst, &obj->rm); + break; + case NVKM_ENGINE_OFA: + ret = rm->api->ofa->alloc(chan, handle, class, inst, &obj->rm); + break; + default: + ret = -EINVAL; + WARN_ON(1); + break; + } + + if (ret) { + kfree(obj); + return ret; + } + + nvkm_object_ctor(&nvkm_rm_engine_obj, oclass, &obj->object); + *pobject = &obj->object; + return 0; +} + +static int +nvkm_rm_engine_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); + + return nvkm_rm_engine_obj_new(&chan->rm.object, chan->id, oclass, pobject); +} + +static void * +nvkm_rm_engine_dtor(struct nvkm_engine *engine) +{ + kfree(engine->func); + return engine; +} + +int +nvkm_rm_engine_ctor(void *(*dtor)(struct nvkm_engine *), struct nvkm_rm *rm, + enum nvkm_subdev_type type, int inst, + const u32 *class, int nclass, struct nvkm_engine *engine) +{ + struct nvkm_engine_func *func; + + func = kzalloc(struct_size(func, sclass, nclass + 1), GFP_KERNEL); + if (!func) + return -ENOMEM; + + func->dtor = dtor; + + for (int i = 0; i < nclass; i++) { + func->sclass[i].oclass = class[i]; + func->sclass[i].minver = -1; + func->sclass[i].maxver = 0; + func->sclass[i].ctor = nvkm_rm_engine_obj_ctor; + } + + nvkm_engine_ctor(func, rm->device, type, inst, true, engine); + return 0; +} + +static int +nvkm_rm_engine_new_(struct nvkm_rm *rm, enum nvkm_subdev_type type, int inst, u32 class, + struct nvkm_engine **pengine) +{ + struct nvkm_engine *engine; + int ret; + + engine = kzalloc(sizeof(*engine), GFP_KERNEL); + if (!engine) + return -ENOMEM; + + ret = nvkm_rm_engine_ctor(nvkm_rm_engine_dtor, rm, type, inst, &class, 1, engine); + if (ret) { + kfree(engine); + return ret; + } + + *pengine = engine; + return 0; +} + +int +nvkm_rm_engine_new(struct nvkm_rm *rm, enum nvkm_subdev_type type, int inst) +{ + const struct nvkm_rm_gpu *gpu = rm->gpu; + struct nvkm_device *device = rm->device; + + switch (type) { + case NVKM_ENGINE_CE: + if (WARN_ON(inst >= ARRAY_SIZE(device->ce))) + return -EINVAL; + + return nvkm_rm_engine_new_(rm, type, inst, gpu->ce.class, &device->ce[inst]); + case NVKM_ENGINE_GR: + if (inst != 0) + return -ENODEV; /* MiG not supported, just ignore. */ + + return nvkm_rm_gr_new(rm); + case NVKM_ENGINE_NVDEC: + if (WARN_ON(inst >= ARRAY_SIZE(device->nvdec))) + return -EINVAL; + + return nvkm_rm_nvdec_new(rm, inst); + case NVKM_ENGINE_NVENC: + if (WARN_ON(inst >= ARRAY_SIZE(device->nvenc))) + return -EINVAL; + + return nvkm_rm_nvenc_new(rm, inst); + case NVKM_ENGINE_NVJPG: + if (WARN_ON(inst >= ARRAY_SIZE(device->nvjpg))) + return -EINVAL; + + return nvkm_rm_engine_new_(rm, type, inst, gpu->nvjpg.class, &device->nvjpg[inst]); + case NVKM_ENGINE_OFA: + if (WARN_ON(inst >= ARRAY_SIZE(device->ofa))) + return -EINVAL; + + return nvkm_rm_engine_new_(rm, type, inst, gpu->ofa.class, &device->ofa[inst]); + default: + break; + } + + return -ENODEV; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h new file mode 100644 index 000000000000..5b8c9c3901d4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __NVKM_RM_ENGINE_H__ +#define __NVKM_RM_ENGINE_H__ +#include "gpu.h" + +int nvkm_rm_engine_ctor(void *(*dtor)(struct nvkm_engine *), struct nvkm_rm *, + enum nvkm_subdev_type type, int inst, + const u32 *class, int nclass, struct nvkm_engine *); +int nvkm_rm_engine_new(struct nvkm_rm *, enum nvkm_subdev_type, int inst); + +int nvkm_rm_engine_obj_new(struct nvkm_gsp_object *chan, int chid, const struct nvkm_oclass *, + struct nvkm_object **); + +int nvkm_rm_gr_new(struct nvkm_rm *); +int nvkm_rm_nvdec_new(struct nvkm_rm *, int inst); +int nvkm_rm_nvenc_new(struct nvkm_rm *, int inst); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c index 9bf80e196149..5e7f18dbf18b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c @@ -13,4 +13,13 @@ ga100_gpu = { .fifo.chan = { .class = AMPERE_CHANNEL_GPFIFO_A, }, + + .ce.class = AMPERE_DMA_COPY_A, + .gr.class = { + .i2m = KEPLER_INLINE_TO_MEMORY_B, + .twod = FERMI_TWOD_A, + .threed = AMPERE_A, + .compute = AMPERE_COMPUTE_A, + }, + .nvdec.class = NVC6B0_VIDEO_DECODER, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c index 55c90148a0d1..61525d23aaa0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c @@ -22,4 +22,15 @@ ga1xx_gpu = { .fifo.chan = { .class = AMPERE_CHANNEL_GPFIFO_A, }, + + .ce.class = AMPERE_DMA_COPY_B, + .gr.class = { + .i2m = KEPLER_INLINE_TO_MEMORY_B, + .twod = FERMI_TWOD_A, + .threed = AMPERE_B, + .compute = AMPERE_COMPUTE_B, + }, + .nvdec.class = NVC7B0_VIDEO_DECODER, + .nvenc.class = NVC7B7_VIDEO_ENCODER, + .ofa.class = NVC7FA_VIDEO_OFA, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h index 4aeeb4b32dc8..a256be42ab6e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h @@ -27,6 +27,35 @@ struct nvkm_rm_gpu { u32 class; } chan; } fifo; + + struct { + u32 class; + } ce; + + struct { + struct { + u32 i2m; + u32 twod; + u32 threed; + u32 compute; + } class; + } gr; + + struct { + u32 class; + } nvdec; + + struct { + u32 class; + } nvenc; + + struct { + u32 class; + } nvjpg; + + struct { + u32 class; + } ofa; }; extern const struct nvkm_rm_gpu tu1xx_gpu; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c new file mode 100644 index 000000000000..22aa894da79d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "gr.h" + +#include +#include + +static int +nvkm_rm_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object); + + return nvkm_rm_engine_obj_new(&chan->chan->rm.object, chan->chan->id, oclass, pobject); +} + +int +nvkm_rm_gr_new(struct nvkm_rm *rm) +{ + const u32 classes[] = { + rm->gpu->gr.class.i2m, + rm->gpu->gr.class.twod, + rm->gpu->gr.class.threed, + rm->gpu->gr.class.compute, + }; + struct nvkm_gr_func *func; + struct r535_gr *gr; + + func = kzalloc(struct_size(func, sclass, ARRAY_SIZE(classes) + 1), GFP_KERNEL); + if (!func) + return -ENOMEM; + + func->dtor = r535_gr_dtor; + func->oneinit = r535_gr_oneinit; + func->units = r535_gr_units; + func->chan_new = r535_gr_chan_new; + + for (int i = 0; i < ARRAY_SIZE(classes); i++) { + func->sclass[i].oclass = classes[i]; + func->sclass[i].minver = -1; + func->sclass[i].maxver = 0; + func->sclass[i].ctor = nvkm_rm_gr_obj_ctor; + } + + gr = kzalloc(sizeof(*gr), GFP_KERNEL); + if (!gr) { + kfree(func); + return -ENOMEM; + } + + nvkm_gr_ctor(func, rm->device, NVKM_ENGINE_GR, 0, true, &gr->base); + rm->device->gr = &gr->base; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h new file mode 100644 index 000000000000..9f2b31651019 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_RM_GR_H__ +#define __NVKM_RM_GR_H__ +#include "engine.h" + +#include +#include + +#define R515_GR_MAX_CTXBUFS 9 + +struct r535_gr_chan { + struct nvkm_object object; + struct r535_gr *gr; + + struct nvkm_vmm *vmm; + struct nvkm_chan *chan; + + struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS]; + struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS]; +}; + +struct r535_gr { + struct nvkm_gr base; + + struct { + u16 bufferId; + u32 size; + u8 page; + u8 align; + bool global; + bool init; + bool ro; + } ctxbuf[R515_GR_MAX_CTXBUFS]; + int ctxbuf_nr; + + struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS]; +}; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c new file mode 100644 index 000000000000..d9fbfc377864 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "engine.h" +#include + +static void * +nvkm_rm_nvdec_dtor(struct nvkm_engine *engine) +{ + return container_of(engine, struct nvkm_nvdec, engine); +} + +int +nvkm_rm_nvdec_new(struct nvkm_rm *rm, int inst) +{ + struct nvkm_nvdec *nvdec; + int ret; + + nvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL); + if (!nvdec) + return -ENOMEM; + + ret = nvkm_rm_engine_ctor(nvkm_rm_nvdec_dtor, rm, NVKM_ENGINE_NVDEC, inst, + &rm->gpu->nvdec.class, 1, &nvdec->engine); + if (ret) { + kfree(nvdec); + return ret; + } + + rm->device->nvdec[inst] = nvdec; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c new file mode 100644 index 000000000000..6dfa7b789e07 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "engine.h" +#include + +static void * +nvkm_rm_nvenc_dtor(struct nvkm_engine *engine) +{ + return container_of(engine, struct nvkm_nvenc, engine); +} + +int +nvkm_rm_nvenc_new(struct nvkm_rm *rm, int inst) +{ + struct nvkm_nvenc *nvenc; + int ret; + + nvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL); + if (!nvenc) + return -ENOMEM; + + ret = nvkm_rm_engine_ctor(nvkm_rm_nvenc_dtor, rm, NVKM_ENGINE_NVENC, inst, + &rm->gpu->nvenc.class, 1, &nvenc->engine); + if (ret) { + kfree(nvenc); + return ret; + } + + rm->device->nvenc[inst] = nvenc; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c index d60003231e6d..2d1ce9db2dcf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c @@ -19,89 +19,28 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include - -#include -#include -#include +#include #include "nvrm/ce.h" #include "nvrm/engine.h" -struct r535_ce_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_ce_obj_dtor(struct nvkm_object *object) -{ - struct r535_ce_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_ce_obj = { - .dtor = r535_ce_obj_dtor, -}; - static int -r535_ce_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) +r535_ce_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst, + struct nvkm_gsp_object *ce) { - struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); - struct r535_ce_obj *obj; NVC0B5_ALLOCATION_PARAMETERS *args; - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_ce_obj, oclass, &obj->object); - *pobject = &obj->object; - - args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, - sizeof(*args), &obj->rm); + args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), ce); if (WARN_ON(IS_ERR(args))) return PTR_ERR(args); args->version = 1; - args->engineType = NV2080_ENGINE_TYPE_COPY0 + oclass->engine->subdev.inst; + args->engineType = NV2080_ENGINE_TYPE_COPY0 + inst; - return nvkm_gsp_rm_alloc_wr(&obj->rm, args); + return nvkm_gsp_rm_alloc_wr(ce, args); } -static void * -r535_ce_dtor(struct nvkm_engine *engine) -{ - kfree(engine->func); - return engine; -} - -int -r535_ce_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) -{ - struct nvkm_engine_func *rm; - int nclass, ret; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_ce_dtor; - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_ce_obj_ctor; - } - - ret = nvkm_engine_new_(rm, device, type, inst, true, pengine); - if (ret) - kfree(rm); - - return ret; -} +const struct nvkm_rm_api_engine +r535_ce = { + .alloc = r535_ce_alloc, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c index 28ac97415e8f..98aa272be642 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c @@ -31,7 +31,7 @@ #include #include -#include +#include #include @@ -230,7 +230,7 @@ r535_engn_nonstall(struct nvkm_engn *engn) } static const struct nvkm_engn_func -r535_ce = { +r535_engn_ce = { .nonstall = r535_engn_nonstall, }; @@ -463,9 +463,17 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo) continue; } + ret = nvkm_rm_engine_new(gsp->rm, type, inst); + if (ret) { + nvkm_runl_del(runl); + continue; + } + + engn = NULL; + switch (type) { case NVKM_ENGINE_CE: - engn = nvkm_runl_add(runl, nv2080, &r535_ce, type, inst); + engn = nvkm_runl_add(runl, nv2080, &r535_engn_ce, type, inst); break; case NVKM_ENGINE_GR: engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c index ab941d808e24..3618fa36040c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c @@ -19,12 +19,13 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include +#include #include #include #include #include +#include #include @@ -34,72 +35,6 @@ #define r535_gr(p) container_of((p), struct r535_gr, base) -#define R515_GR_MAX_CTXBUFS 9 - -struct r535_gr { - struct nvkm_gr base; - - struct { - u16 bufferId; - u32 size; - u8 page; - u8 align; - bool global; - bool init; - bool ro; - } ctxbuf[R515_GR_MAX_CTXBUFS]; - int ctxbuf_nr; - - struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS]; -}; - -struct r535_gr_chan { - struct nvkm_object object; - struct r535_gr *gr; - - struct nvkm_vmm *vmm; - struct nvkm_chan *chan; - - struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS]; - struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS]; -}; - -struct r535_gr_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_gr_obj_dtor(struct nvkm_object *object) -{ - struct r535_gr_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_gr_obj = { - .dtor = r535_gr_obj_dtor, -}; - -static int -r535_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) -{ - struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object); - struct r535_gr_obj *obj; - - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_gr_obj, oclass, &obj->object); - *pobject = &obj->object; - - return nvkm_gsp_rm_alloc(&chan->chan->rm.object, oclass->handle, oclass->base.oclass, 0, - &obj->rm); -} - static void * r535_gr_chan_dtor(struct nvkm_object *object) { @@ -203,7 +138,7 @@ r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm, return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl); } -static int +int r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass, struct nvkm_object **pobject) { @@ -227,7 +162,7 @@ r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm return 0; } -static u64 +u64 r535_gr_units(struct nvkm_gr *gr) { struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp; @@ -235,7 +170,7 @@ r535_gr_units(struct nvkm_gr *gr) return (gsp->gr.tpcs << 8) | gsp->gr.gpcs; } -static int +int r535_gr_oneinit(struct nvkm_gr *base) { NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info; @@ -243,6 +178,7 @@ r535_gr_oneinit(struct nvkm_gr *base) struct nvkm_subdev *subdev = &gr->base.engine.subdev; struct nvkm_device *device = subdev->device; struct nvkm_gsp *gsp = device->gsp; + struct nvkm_rm *rm = gsp->rm; struct nvkm_mmu *mmu = device->mmu; struct { struct nvkm_memory *inst; @@ -250,6 +186,7 @@ r535_gr_oneinit(struct nvkm_gr *base) struct nvkm_gsp_object chan; struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS]; } golden = {}; + struct nvkm_gsp_object threed; int ret; /* Allocate a channel to use for golden context init. */ @@ -421,30 +358,12 @@ r535_gr_oneinit(struct nvkm_gr *base) goto done; /* Allocate 3D class on channel to trigger golden context init in RM. */ - { - int i; - - for (i = 0; gr->base.func->sclass[i].ctor; i++) { - if ((gr->base.func->sclass[i].oclass & 0xff) == 0x97) { - struct nvkm_gsp_object threed; - - ret = nvkm_gsp_rm_alloc(&golden.chan, 0x97000000, - gr->base.func->sclass[i].oclass, 0, - &threed); - if (ret) - goto done; - - nvkm_gsp_rm_free(&threed); - break; - } - } - - if (WARN_ON(!gr->base.func->sclass[i].ctor)) { - ret = -EINVAL; - goto done; - } - } + ret = nvkm_gsp_rm_alloc(&golden.chan, 0x97000000, rm->gpu->gr.class.threed, 0, &threed); + if (ret) + goto done; + /* There's no need to keep the golden channel around, as RM caches the context. */ + nvkm_gsp_rm_free(&threed); done: nvkm_gsp_rm_free(&golden.chan); for (int i = gr->ctxbuf_nr - 1; i >= 0; i--) @@ -455,7 +374,7 @@ done: } -static void * +void * r535_gr_dtor(struct nvkm_gr *base) { struct r535_gr *gr = r535_gr(base); @@ -466,38 +385,3 @@ r535_gr_dtor(struct nvkm_gr *base) kfree(gr->base.func); return gr; } - -int -r535_gr_new(const struct gf100_gr_func *hw, - struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) -{ - struct nvkm_gr_func *rm; - struct r535_gr *gr; - int nclass; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_gr_dtor; - rm->oneinit = r535_gr_oneinit; - rm->units = r535_gr_units; - rm->chan_new = r535_gr_chan_new; - - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_gr_obj_ctor; - } - - if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) { - kfree(rm); - return -ENOMEM; - } - - *pgr = &gr->base; - - return nvkm_gr_ctor(rm, device, type, inst, true, &gr->base); -} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c index 05d0916d199e..a8c42ec0367b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c @@ -19,91 +19,27 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include - -#include -#include -#include +#include #include "nvrm/nvdec.h" -struct r535_nvdec_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_nvdec_obj_dtor(struct nvkm_object *object) -{ - struct r535_nvdec_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_nvdec_obj = { - .dtor = r535_nvdec_obj_dtor, -}; - static int -r535_nvdec_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) +r535_nvdec_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst, + struct nvkm_gsp_object *nvdec) { - struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); - struct r535_nvdec_obj *obj; NV_BSP_ALLOCATION_PARAMETERS *args; - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_nvdec_obj, oclass, &obj->object); - *pobject = &obj->object; - - args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, - sizeof(*args), &obj->rm); + args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvdec); if (WARN_ON(IS_ERR(args))) return PTR_ERR(args); args->size = sizeof(*args); - args->engineInstance = oclass->engine->subdev.inst; + args->engineInstance = inst; - return nvkm_gsp_rm_alloc_wr(&obj->rm, args); + return nvkm_gsp_rm_alloc_wr(nvdec, args); } -static void * -r535_nvdec_dtor(struct nvkm_engine *engine) -{ - struct nvkm_nvdec *nvdec = nvkm_nvdec(engine); - - kfree(nvdec->engine.func); - return nvdec; -} - -int -r535_nvdec_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec) -{ - struct nvkm_engine_func *rm; - int nclass; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_nvdec_dtor; - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_nvdec_obj_ctor; - } - - if (!(*pnvdec = kzalloc(sizeof(**pnvdec), GFP_KERNEL))) { - kfree(rm); - return -ENOMEM; - } - - return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvdec)->engine); -} +const struct nvkm_rm_api_engine +r535_nvdec = { + .alloc = r535_nvdec_alloc, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c index dcf80d1f1e9e..acb3ce8bb9de 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c @@ -19,91 +19,27 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include - -#include -#include -#include +#include #include "nvrm/nvenc.h" -struct r535_nvenc_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_nvenc_obj_dtor(struct nvkm_object *object) -{ - struct r535_nvenc_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_nvenc_obj = { - .dtor = r535_nvenc_obj_dtor, -}; - static int -r535_nvenc_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) +r535_nvenc_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst, + struct nvkm_gsp_object *nvenc) { - struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); - struct r535_nvenc_obj *obj; NV_MSENC_ALLOCATION_PARAMETERS *args; - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_nvenc_obj, oclass, &obj->object); - *pobject = &obj->object; - - args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, - sizeof(*args), &obj->rm); + args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvenc); if (WARN_ON(IS_ERR(args))) return PTR_ERR(args); args->size = sizeof(*args); - args->engineInstance = oclass->engine->subdev.inst; + args->engineInstance = inst; - return nvkm_gsp_rm_alloc_wr(&obj->rm, args); + return nvkm_gsp_rm_alloc_wr(nvenc, args); } -static void * -r535_nvenc_dtor(struct nvkm_engine *engine) -{ - struct nvkm_nvenc *nvenc = nvkm_nvenc(engine); - - kfree(nvenc->engine.func); - return nvenc; -} - -int -r535_nvenc_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc) -{ - struct nvkm_engine_func *rm; - int nclass; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_nvenc_dtor; - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_nvenc_obj_ctor; - } - - if (!(*pnvenc = kzalloc(sizeof(**pnvenc), GFP_KERNEL))) { - kfree(rm); - return -ENOMEM; - } - - return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvenc)->engine); -} +const struct nvkm_rm_api_engine +r535_nvenc = { + .alloc = r535_nvenc_alloc, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c index 8a8d7becba93..fbc4080ad8d8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c @@ -19,88 +19,27 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include - -#include -#include -#include +#include #include "nvrm/nvjpg.h" -struct r535_nvjpg_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_nvjpg_obj_dtor(struct nvkm_object *object) -{ - struct r535_nvjpg_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_nvjpg_obj = { - .dtor = r535_nvjpg_obj_dtor, -}; - static int -r535_nvjpg_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) +r535_nvjpg_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst, + struct nvkm_gsp_object *nvjpg) { - struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); - struct r535_nvjpg_obj *obj; NV_NVJPG_ALLOCATION_PARAMETERS *args; - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_nvjpg_obj, oclass, &obj->object); - *pobject = &obj->object; - - args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, - sizeof(*args), &obj->rm); + args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvjpg); if (WARN_ON(IS_ERR(args))) return PTR_ERR(args); args->size = sizeof(*args); - args->engineInstance = oclass->engine->subdev.inst; + args->engineInstance = inst; - return nvkm_gsp_rm_alloc_wr(&obj->rm, args); + return nvkm_gsp_rm_alloc_wr(nvjpg, args); } -static void * -r535_nvjpg_dtor(struct nvkm_engine *engine) -{ - kfree(engine->func); - return engine; -} - -int -r535_nvjpg_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) -{ - struct nvkm_engine_func *rm; - int nclass, ret; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_nvjpg_dtor; - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_nvjpg_obj_ctor; - } - - ret = nvkm_engine_new_(rm, device, type, inst, true, pengine); - if (ret) - kfree(rm); - - return ret; -} +const struct nvkm_rm_api_engine +r535_nvjpg = { + .alloc = r535_nvjpg_alloc, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c index 4bd84ff04702..2156808cba4f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c @@ -19,88 +19,26 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#include - -#include -#include -#include -#include +#include #include "nvrm/ofa.h" -struct r535_ofa_obj { - struct nvkm_object object; - struct nvkm_gsp_object rm; -}; - -static void * -r535_ofa_obj_dtor(struct nvkm_object *object) -{ - struct r535_ofa_obj *obj = container_of(object, typeof(*obj), object); - - nvkm_gsp_rm_free(&obj->rm); - return obj; -} - -static const struct nvkm_object_func -r535_ofa_obj = { - .dtor = r535_ofa_obj_dtor, -}; - static int -r535_ofa_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, - struct nvkm_object **pobject) +r535_ofa_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst, + struct nvkm_gsp_object *ofa) { - struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent); - struct r535_ofa_obj *obj; NV_OFA_ALLOCATION_PARAMETERS *args; - if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL))) - return -ENOMEM; - - nvkm_object_ctor(&r535_ofa_obj, oclass, &obj->object); - *pobject = &obj->object; - - args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass, - sizeof(*args), &obj->rm); + args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), ofa); if (WARN_ON(IS_ERR(args))) return PTR_ERR(args); args->size = sizeof(*args); - return nvkm_gsp_rm_alloc_wr(&obj->rm, args); + return nvkm_gsp_rm_alloc_wr(ofa, args); } -static void * -r535_ofa_dtor(struct nvkm_engine *engine) -{ - kfree(engine->func); - return engine; -} - -int -r535_ofa_new(const struct nvkm_engine_func *hw, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) -{ - struct nvkm_engine_func *rm; - int nclass, ret; - - for (nclass = 0; hw->sclass[nclass].oclass; nclass++); - - if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL))) - return -ENOMEM; - - rm->dtor = r535_ofa_dtor; - for (int i = 0; i < nclass; i++) { - rm->sclass[i].minver = hw->sclass[i].minver; - rm->sclass[i].maxver = hw->sclass[i].maxver; - rm->sclass[i].oclass = hw->sclass[i].oclass; - rm->sclass[i].ctor = r535_ofa_obj_ctor; - } - - ret = nvkm_engine_new_(rm, device, type, inst, true, pengine); - if (ret) - kfree(rm); - - return ret; -} +const struct nvkm_rm_api_engine +r535_ofa = { + .alloc = r535_ofa_alloc, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c index 3c17b75b5e37..6de7d1a91119 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c @@ -11,6 +11,11 @@ r535_api = { .alloc = &r535_alloc, .client = &r535_client, .device = &r535_device, + .ce = &r535_ce, + .nvdec = &r535_nvdec, + .nvenc = &r535_nvenc, + .nvjpg = &r535_nvjpg, + .ofa = &r535_ofa, }; const struct nvkm_rm_impl diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 41d4ed70fc10..bda22703690a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -53,6 +53,11 @@ struct nvkm_rm_api { void (*dtor)(struct nvkm_gsp_event *); } event; } *device; + + const struct nvkm_rm_api_engine { + int (*alloc)(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst, + struct nvkm_gsp_object *); + } *ce, *nvdec, *nvenc, *nvjpg, *ofa; }; extern const struct nvkm_rm_impl r535_rm_tu102; @@ -62,4 +67,14 @@ extern const struct nvkm_rm_api_ctrl r535_ctrl; extern const struct nvkm_rm_api_alloc r535_alloc; extern const struct nvkm_rm_api_client r535_client; extern const struct nvkm_rm_api_device r535_device; +extern const struct nvkm_rm_api_engine r535_ce; +void *r535_gr_dtor(struct nvkm_gr *); +int r535_gr_oneinit(struct nvkm_gr *); +u64 r535_gr_units(struct nvkm_gr *); +int r535_gr_chan_new(struct nvkm_gr *, struct nvkm_chan *, const struct nvkm_oclass *, + struct nvkm_object **); +extern const struct nvkm_rm_api_engine r535_nvdec; +extern const struct nvkm_rm_api_engine r535_nvenc; +extern const struct nvkm_rm_api_engine r535_nvjpg; +extern const struct nvkm_rm_api_engine r535_ofa; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c index bb674b9cef69..883b9eddbfe6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c @@ -22,4 +22,14 @@ tu1xx_gpu = { .fifo.chan = { .class = TURING_CHANNEL_GPFIFO_A, }, + + .ce.class = TURING_DMA_COPY_A, + .gr.class = { + .i2m = KEPLER_INLINE_TO_MEMORY_B, + .twod = FERMI_TWOD_A, + .threed = TURING_A, + .compute = TURING_COMPUTE_A, + }, + .nvdec.class = NVC4B0_VIDEO_DECODER, + .nvenc.class = NVC4B7_VIDEO_ENCODER, }; -- cgit v1.2.3 From 38cafe9bd914dd4039c1bdcba8e47b95f961846b Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 8 May 2025 06:48:37 +1000 Subject: drm/nouveau/gsp: add defines for rmapi object handles Add header containing defines for RMAPI handles used by NVKM, and use them in place of magic values when calling RM_ALLOC. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h | 17 +++++++++++++++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c | 2 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c | 6 +++--- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c | 4 ++-- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 1 + 8 files changed, 27 insertions(+), 9 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h new file mode 100644 index 000000000000..50f2f2a86b5a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __NVKM_RM_HANDLES_H__ +#define __NVKM_RM_HANDLES_H__ + +/* RMAPI handles for various objects allocated from GSP-RM with RM_ALLOC. */ + +#define NVKM_RM_CLIENT(id) (0xc1d00000 | (id)) +#define NVKM_RM_DEVICE 0xde1d0000 +#define NVKM_RM_SUBDEVICE 0x5d1d0000 +#define NVKM_RM_DISP 0x00730000 +#define NVKM_RM_VASPACE 0x90f10000 +#define NVKM_RM_CHAN(chid) (0xf1f00000 | (chid)) +#define NVKM_RM_THREED 0x97000000 +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c index c0146c00584d..449338da1795 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c @@ -53,7 +53,7 @@ r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) client->object.client = client; INIT_LIST_HEAD(&client->events); - args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args), + args = nvkm_gsp_rm_alloc_get(&client->object, NVKM_RM_CLIENT(ret), NV01_ROOT, sizeof(*args), &client->object); if (IS_ERR(args)) { r535_gsp_client_dtor(client); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c index 094abf7b5f97..f830e12a8f6e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c @@ -111,8 +111,8 @@ r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device) { NV2080_ALLOC_PARAMETERS *args; - return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args), - &device->subdevice); + return nvkm_gsp_rm_alloc(&device->object, NVKM_RM_SUBDEVICE, NV20_SUBDEVICE_0, + sizeof(*args), &device->subdevice); } static int @@ -121,7 +121,7 @@ r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *dev NV0080_ALLOC_PARAMETERS *args; int ret; - args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args), + args = nvkm_gsp_rm_alloc_get(&client->object, NVKM_RM_DEVICE, NV01_DEVICE_0, sizeof(*args), &device->object); if (IS_ERR(args)) return PTR_ERR(args); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c index e65f9074e94f..1ba86e223978 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c @@ -1468,7 +1468,7 @@ r535_disp_oneinit(struct nvkm_disp *disp) if (ret) return ret; - ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, 0x00730000, NV04_DISPLAY_COMMON, 0, + ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, NVKM_RM_DISP, NV04_DISPLAY_COMMON, 0, &disp->rm.objcom); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c index 98aa272be642..ad9d93f9820d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c @@ -101,7 +101,7 @@ r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, if (!chan->rm.mthdbuf.ptr) return -ENOMEM; - args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, 0xf1f00000 | chan->id, + args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, NVKM_RM_CHAN(chan->id), fifo->func->chan.user.oclass, sizeof(*args), &chan->rm.object); if (WARN_ON(IS_ERR(args))) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c index 3618fa36040c..4c0df52e8683 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c @@ -205,7 +205,7 @@ r535_gr_oneinit(struct nvkm_gr *base) { NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args; - args = nvkm_gsp_rm_alloc_get(&golden.vmm->rm.device.object, 0xf1f00000, + args = nvkm_gsp_rm_alloc_get(&golden.vmm->rm.device.object, NVKM_RM_CHAN(0), device->fifo->func->chan.user.oclass, sizeof(*args), &golden.chan); if (IS_ERR(args)) { @@ -358,7 +358,7 @@ r535_gr_oneinit(struct nvkm_gr *base) goto done; /* Allocate 3D class on channel to trigger golden context init in RM. */ - ret = nvkm_gsp_rm_alloc(&golden.chan, 0x97000000, rm->gpu->gr.class.threed, 0, &threed); + ret = nvkm_gsp_rm_alloc(&golden.chan, NVKM_RM_THREED, rm->gpu->gr.class.threed, 0, &threed); if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c index c697885c65d3..99af6c19a9a9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c @@ -34,7 +34,7 @@ r535_mmu_promote_vmm(struct nvkm_vmm *vmm) if (ret) return ret; - args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, 0x90f10000, FERMI_VASPACE_A, + args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, NVKM_RM_VASPACE, FERMI_VASPACE_A, sizeof(*args), &vmm->rm.object); if (IS_ERR(args)) return PTR_ERR(args); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index bda22703690a..4d9e5ea3b2fa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -5,6 +5,7 @@ #include #ifndef __NVKM_RM_H__ #define __NVKM_RM_H__ +#include "handles.h" struct nvkm_rm_impl { const struct nvkm_rm_api *api; -- cgit v1.2.3 From 57fe0d30a0a6fafc0b89ab3d8ec8d81f6742f1b6 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:37 +1000 Subject: drm/nouveau/gsp: add hal for wpr config info + meta init 545.23.06 increases the libos3 heap size requirements, and GH100/GBxxx will need their own implementation entirely. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h | 3 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c | 4 - drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c | 3 - drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c | 4 - drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h | 6 -- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 92 +------------------- .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h | 8 ++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c | 18 ++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 8 ++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c | 97 +++++++++++++++++++++- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c | 3 - 12 files changed, 134 insertions(+), 113 deletions(-) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index eeaf72f6add3..ef781c4ca11f 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -17,6 +17,9 @@ struct nvkm_gsp_mem { dma_addr_t addr; }; +int nvkm_gsp_mem_ctor(struct nvkm_gsp *, size_t size, struct nvkm_gsp_mem *); +void nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *); + struct nvkm_gsp_radix3 { struct nvkm_gsp_mem lvl0; struct nvkm_gsp_mem lvl1; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c index d7933bfc59fd..8ab02d683c90 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c @@ -28,10 +28,6 @@ ad102_gsp = { .sig_section = ".fwsignature_ad10x", - .wpr_heap.os_carveout_size = 20 << 20, - .wpr_heap.base_size = 8 << 20, - .wpr_heap.min_size = 84 << 20, - .booter.ctor = ga102_gsp_booter_ctor, .dtor = r535_gsp_dtor, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c index 0f8526aa969f..be6bbf06d58b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c @@ -148,6 +148,7 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device, gsp->rm->device = device; gsp->rm->gpu = fwif->func->rm.gpu; + gsp->rm->wpr = fwif->rm->wpr; gsp->rm->api = fwif->rm->api; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c index 77e3501296c9..a6836a85b2ac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c @@ -45,9 +45,6 @@ ga100_gsp = { .sig_section = ".fwsignature_ga100", - .wpr_heap.base_size = 8 << 20, - .wpr_heap.min_size = 64 << 20, - .booter.ctor = tu102_gsp_booter_ctor, .dtor = r535_gsp_dtor, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c index 709a046d86bf..202b5bdc3980 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c @@ -156,10 +156,6 @@ ga102_gsp_r535 = { .sig_section = ".fwsignature_ga10x", - .wpr_heap.os_carveout_size = 20 << 20, - .wpr_heap.base_size = 8 << 20, - .wpr_heap.min_size = 84 << 20, - .booter.ctor = ga102_gsp_booter_ctor, .dtor = r535_gsp_dtor, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h index de274f6426c1..d42ae235d2f4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -36,12 +36,6 @@ struct nvkm_gsp_func { char *sig_section; - struct { - u32 os_carveout_size; - u32 base_size; - u64 min_size; - } wpr_heap; - struct { int (*ctor)(struct nvkm_gsp *, const char *name, const struct firmware *, struct nvkm_falcon *, struct nvkm_falcon_fw *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index 8ca0f99ccbac..ec69fdb9492a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -231,7 +231,7 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) return 0; } -static void +void nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem) { if (mem->data) { @@ -260,7 +260,7 @@ nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem) * so we take a device reference to ensure its lifetime. The reference is * dropped in the destructor. */ -static int +int nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem) { mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); @@ -1129,55 +1129,6 @@ r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc) return 0; } -static int -r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp) -{ - GspFwWprMeta *meta; - int ret; - - ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta); - if (ret) - return ret; - - meta = gsp->wpr_meta.data; - - meta->magic = GSP_FW_WPR_META_MAGIC; - meta->revision = GSP_FW_WPR_META_REVISION; - - meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr; - meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size; - - meta->sysmemAddrOfBootloader = gsp->boot.fw.addr; - meta->sizeOfBootloader = gsp->boot.fw.size; - meta->bootloaderCodeOffset = gsp->boot.code_offset; - meta->bootloaderDataOffset = gsp->boot.data_offset; - meta->bootloaderManifestOffset = gsp->boot.manifest_offset; - - meta->sysmemAddrOfSignature = gsp->sig.addr; - meta->sizeOfSignature = gsp->sig.size; - - meta->gspFwRsvdStart = gsp->fb.heap.addr; - meta->nonWprHeapOffset = gsp->fb.heap.addr; - meta->nonWprHeapSize = gsp->fb.heap.size; - meta->gspFwWprStart = gsp->fb.wpr2.addr; - meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr; - meta->gspFwHeapSize = gsp->fb.wpr2.heap.size; - meta->gspFwOffset = gsp->fb.wpr2.elf.addr; - meta->bootBinOffset = gsp->fb.wpr2.boot.addr; - meta->frtsOffset = gsp->fb.wpr2.frts.addr; - meta->frtsSize = gsp->fb.wpr2.frts.size; - meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000); - meta->fbSize = gsp->fb.size; - meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr; - meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size; - meta->bootCount = 0; - meta->partitionRpcAddr = 0; - meta->partitionRpcRequestOffset = 0; - meta->partitionRpcReplyOffset = 0; - meta->verified = 0; - return 0; -} - static int r535_gsp_shared_init(struct nvkm_gsp *gsp) { @@ -2179,49 +2130,10 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) /* Release FW images - we've copied them to DMA buffers now. */ nvkm_gsp_dtor_fws(gsp); - /* Calculate FB layout. */ - gsp->fb.wpr2.frts.size = 0x100000; - gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size; - - gsp->fb.wpr2.boot.size = gsp->boot.fw.size; - gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000); - - gsp->fb.wpr2.elf.size = gsp->fw.len; - gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000); - - { - u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30); - - gsp->fb.wpr2.heap.size = - gsp->func->wpr_heap.os_carveout_size + - gsp->func->wpr_heap.base_size + - ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) + - ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20); - - gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size); - } - - gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000); - gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000); - - gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000); - gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr; - - gsp->fb.heap.size = 0x100000; - gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size; - - ret = nvkm_gsp_fwsec_frts(gsp); - if (WARN_ON(ret)) - return ret; - ret = r535_gsp_libos_init(gsp); if (WARN_ON(ret)) return ret; - ret = r535_gsp_wpr_meta_init(gsp); - if (WARN_ON(ret)) - return ret; - ret = r535_gsp_rpc_set_system_info(gsp); if (WARN_ON(ret)) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h index 085a7dac0405..b6683a5bf870 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h @@ -814,4 +814,12 @@ typedef struct GSP_MSG_QUEUE_ELEMENT NvU32 elemCount; // Number of message queue elements this message has. NV_DECLARE_ALIGNED(rpc_message_header_v rpc, 8); } GSP_MSG_QUEUE_ELEMENT; + +#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2 (0 << 20) // No FB heap usage +#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3 (20 << 20) + +#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X (8 << 20) // Turing thru Ada + +#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB (64u) +#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB (84u) #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c index 6de7d1a91119..60e8678b7913 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c @@ -4,6 +4,22 @@ */ #include +#include "nvrm/gsp.h" + +static const struct nvkm_rm_wpr +r535_wpr_libos2 = { + .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2, + .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X, + .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB, +}; + +static const struct nvkm_rm_wpr +r535_wpr_libos3 = { + .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3, + .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X, + .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, +}; + static const struct nvkm_rm_api r535_api = { .rpc = &r535_rpc, @@ -20,10 +36,12 @@ r535_api = { const struct nvkm_rm_impl r535_rm_tu102 = { + .wpr = &r535_wpr_libos2, .api = &r535_api, }; const struct nvkm_rm_impl r535_rm_ga102 = { + .wpr = &r535_wpr_libos3, .api = &r535_api, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 4d9e5ea3b2fa..1a2fec3935a4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -8,15 +8,23 @@ #include "handles.h" struct nvkm_rm_impl { + const struct nvkm_rm_wpr *wpr; const struct nvkm_rm_api *api; }; struct nvkm_rm { struct nvkm_device *device; const struct nvkm_rm_gpu *gpu; + const struct nvkm_rm_wpr *wpr; const struct nvkm_rm_api *api; }; +struct nvkm_rm_wpr { + u32 os_carveout_size; + u32 base_size; + u64 heap_size_min; +}; + struct nvkm_rm_api { const struct nvkm_rm_api_rpc { void *(*get)(struct nvkm_gsp *, u32 fn, u32 argc); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c index fef9c4444017..a07f59e5ef7a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c @@ -24,6 +24,8 @@ #include #include +#include + #include #include #include @@ -195,6 +197,69 @@ tu102_gsp_init(struct nvkm_gsp *gsp) return r535_gsp_init(gsp); } +static int +tu102_gsp_wpr_meta_init(struct nvkm_gsp *gsp) +{ + GspFwWprMeta *meta; + int ret; + + ret = nvkm_gsp_mem_ctor(gsp, sizeof(*meta), &gsp->wpr_meta); + if (ret) + return ret; + + meta = gsp->wpr_meta.data; + + meta->magic = GSP_FW_WPR_META_MAGIC; + meta->revision = GSP_FW_WPR_META_REVISION; + + meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr; + meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size; + + meta->sysmemAddrOfBootloader = gsp->boot.fw.addr; + meta->sizeOfBootloader = gsp->boot.fw.size; + meta->bootloaderCodeOffset = gsp->boot.code_offset; + meta->bootloaderDataOffset = gsp->boot.data_offset; + meta->bootloaderManifestOffset = gsp->boot.manifest_offset; + + meta->sysmemAddrOfSignature = gsp->sig.addr; + meta->sizeOfSignature = gsp->sig.size; + + meta->gspFwRsvdStart = gsp->fb.heap.addr; + meta->nonWprHeapOffset = gsp->fb.heap.addr; + meta->nonWprHeapSize = gsp->fb.heap.size; + meta->gspFwWprStart = gsp->fb.wpr2.addr; + meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr; + meta->gspFwHeapSize = gsp->fb.wpr2.heap.size; + meta->gspFwOffset = gsp->fb.wpr2.elf.addr; + meta->bootBinOffset = gsp->fb.wpr2.boot.addr; + meta->frtsOffset = gsp->fb.wpr2.frts.addr; + meta->frtsSize = gsp->fb.wpr2.frts.size; + meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000); + meta->fbSize = gsp->fb.size; + meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr; + meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size; + meta->bootCount = 0; + meta->partitionRpcAddr = 0; + meta->partitionRpcRequestOffset = 0; + meta->partitionRpcReplyOffset = 0; + meta->verified = 0; + return 0; +} + +static u64 +tu102_gsp_wpr_heap_size(struct nvkm_gsp *gsp) +{ + u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30); + u64 heap_size; + + heap_size = gsp->rm->wpr->os_carveout_size + + gsp->rm->wpr->base_size + + ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) + + ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20); + + return max(heap_size, gsp->rm->wpr->heap_size_min); +} + static u64 tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size) { @@ -241,6 +306,35 @@ tu102_gsp_oneinit(struct nvkm_gsp *gsp) if (ret) return ret; + /* Calculate FB layout. */ + gsp->fb.wpr2.frts.size = 0x100000; + gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size; + + gsp->fb.wpr2.boot.size = gsp->boot.fw.size; + gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000); + + gsp->fb.wpr2.elf.size = gsp->fw.len; + gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000); + + gsp->fb.wpr2.heap.size = tu102_gsp_wpr_heap_size(gsp); + + gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000); + gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000); + + gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000); + gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr; + + gsp->fb.heap.size = 0x100000; + gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size; + + ret = tu102_gsp_wpr_meta_init(gsp); + if (ret) + return ret; + + ret = nvkm_gsp_fwsec_frts(gsp); + if (WARN_ON(ret)) + return ret; + /* Reset GSP into RISC-V mode. */ ret = gsp->func->reset(gsp); if (ret) @@ -274,9 +368,6 @@ tu102_gsp = { .sig_section = ".fwsignature_tu10x", - .wpr_heap.base_size = 8 << 20, - .wpr_heap.min_size = 64 << 20, - .booter.ctor = tu102_gsp_booter_ctor, .dtor = r535_gsp_dtor, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c index 5f279813626f..9e897bdcb647 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c @@ -28,9 +28,6 @@ tu116_gsp = { .sig_section = ".fwsignature_tu11x", - .wpr_heap.base_size = 8 << 20, - .wpr_heap.min_size = 64 << 20, - .booter.ctor = tu102_gsp_booter_ctor, .dtor = r535_gsp_dtor, -- cgit v1.2.3 From aa733b3ee6bd4450cd3278e12df5e547e8bbe92a Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:37 +1000 Subject: drm/nouveau/gsp: add hal for gsp.set_system_info() 545.23.06 has incompatible changes to GspSystemInfo. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 10 ++++++++-- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 5 +++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index ec69fdb9492a..f574a3ad2082 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -871,7 +871,7 @@ r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi) } static int -r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp) +r535_gsp_set_system_info(struct nvkm_gsp *gsp) { struct nvkm_device *device = gsp->subdev.device; struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device); @@ -2080,6 +2080,7 @@ int r535_gsp_oneinit(struct nvkm_gsp *gsp) { struct nvkm_device *device = gsp->subdev.device; + const struct nvkm_rm_api *rmapi = gsp->rm->api; const u8 *data; u64 size; int ret; @@ -2134,7 +2135,7 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) if (WARN_ON(ret)) return ret; - ret = r535_gsp_rpc_set_system_info(gsp); + ret = rmapi->gsp->set_system_info(gsp); if (WARN_ON(ret)) return ret; @@ -2146,3 +2147,8 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) idr_init(&gsp->client_id.idr); return 0; } + +const struct nvkm_rm_api_gsp +r535_gsp = { + .set_system_info = r535_gsp_set_system_info, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c index 60e8678b7913..efedd387fcc5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c @@ -22,6 +22,7 @@ r535_wpr_libos3 = { static const struct nvkm_rm_api r535_api = { + .gsp = &r535_gsp, .rpc = &r535_rpc, .ctrl = &r535_ctrl, .alloc = &r535_alloc, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 1a2fec3935a4..4a37904f7f9c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -26,6 +26,10 @@ struct nvkm_rm_wpr { }; struct nvkm_rm_api { + const struct nvkm_rm_api_gsp { + int (*set_system_info)(struct nvkm_gsp *); + } *gsp; + const struct nvkm_rm_api_rpc { void *(*get)(struct nvkm_gsp *, u32 fn, u32 argc); void *(*push)(struct nvkm_gsp *gsp, void *argv, @@ -71,6 +75,7 @@ struct nvkm_rm_api { extern const struct nvkm_rm_impl r535_rm_tu102; extern const struct nvkm_rm_impl r535_rm_ga102; +extern const struct nvkm_rm_api_gsp r535_gsp; extern const struct nvkm_rm_api_rpc r535_rpc; extern const struct nvkm_rm_api_ctrl r535_ctrl; extern const struct nvkm_rm_api_alloc r535_alloc; -- cgit v1.2.3 From 7bb77eacdb85d7628a541938f5752528eb1d4029 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:37 +1000 Subject: drm/nouveau/gsp: add hal for gsp.get_static_info() 545.23.06 has incompatible changes to a number of definitions that impact the layout of GspStaticConfigInfo. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 65 ++++++++++++---------- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 1 + 2 files changed, 38 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index f574a3ad2082..ed0fbfbd5168 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -165,35 +165,14 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp) return ret; } -static int -r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) +static void +r535_gsp_get_static_info_fb(struct nvkm_gsp *gsp, + const struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *info) { - GspStaticConfigInfo *rpc; int last_usable = -1; - rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc)); - if (IS_ERR(rpc)) - return PTR_ERR(rpc); - - gsp->internal.client.object.client = &gsp->internal.client; - gsp->internal.client.object.parent = NULL; - gsp->internal.client.object.handle = rpc->hInternalClient; - gsp->internal.client.gsp = gsp; - - gsp->internal.device.object.client = &gsp->internal.client; - gsp->internal.device.object.parent = &gsp->internal.client.object; - gsp->internal.device.object.handle = rpc->hInternalDevice; - - gsp->internal.device.subdevice.client = &gsp->internal.client; - gsp->internal.device.subdevice.parent = &gsp->internal.device.object; - gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice; - - gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase; - gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase; - - for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) { - NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = - &rpc->fbRegionInfoParams.fbRegion[i]; + for (int i = 0; i < info->numFBRegions; i++) { + const NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = &info->fbRegion[i]; nvkm_debug(&gsp->subdev, "fb region %d: " "%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i, @@ -215,10 +194,38 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) } if (last_usable >= 0) { - u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1; + u32 rsvd_base = info->fbRegion[last_usable].limit + 1; gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base; } +} + +static int +r535_gsp_get_static_info(struct nvkm_gsp *gsp) +{ + GspStaticConfigInfo *rpc; + + rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc)); + if (IS_ERR(rpc)) + return PTR_ERR(rpc); + + gsp->internal.client.object.client = &gsp->internal.client; + gsp->internal.client.object.parent = NULL; + gsp->internal.client.object.handle = rpc->hInternalClient; + gsp->internal.client.gsp = gsp; + + gsp->internal.device.object.client = &gsp->internal.client; + gsp->internal.device.object.parent = &gsp->internal.client.object; + gsp->internal.device.object.handle = rpc->hInternalDevice; + + gsp->internal.device.subdevice.client = &gsp->internal.client; + gsp->internal.device.subdevice.parent = &gsp->internal.device.object; + gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice; + + gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase; + gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase; + + r535_gsp_get_static_info_fb(gsp, &rpc->fbRegionInfoParams); for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) { if (rpc->gpcInfo.gpcMask & BIT(gpc)) { @@ -277,9 +284,10 @@ static int r535_gsp_postinit(struct nvkm_gsp *gsp) { struct nvkm_device *device = gsp->subdev.device; + const struct nvkm_rm_api *rmapi = gsp->rm->api; int ret; - ret = r535_gsp_rpc_get_gsp_static_info(gsp); + ret = rmapi->gsp->get_static_info(gsp); if (WARN_ON(ret)) return ret; @@ -2151,4 +2159,5 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) const struct nvkm_rm_api_gsp r535_gsp = { .set_system_info = r535_gsp_set_system_info, + .get_static_info = r535_gsp_get_static_info, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 4a37904f7f9c..445793d8147b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -28,6 +28,7 @@ struct nvkm_rm_wpr { struct nvkm_rm_api { const struct nvkm_rm_api_gsp { int (*set_system_info)(struct nvkm_gsp *); + int (*get_static_info)(struct nvkm_gsp *); } *gsp; const struct nvkm_rm_api_rpc { -- cgit v1.2.3 From 20235009c11d839b371298ae96dfd12ef2355c27 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:37 +1000 Subject: drm/nouveau/gsp: add hal for gsp.xlat_mc_engine_idx() 545.23.06 has incompatible changes to MC_ENGINE_IDX definitions. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 79 ++++++++++++---------- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 1 + 2 files changed, 45 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index ed0fbfbd5168..65640b43f477 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -86,10 +86,52 @@ r535_gsp_intr(struct nvkm_inth *inth) return IRQ_HANDLED; } +static bool +r535_gsp_xlat_mc_engine_idx(u32 mc_engine_idx, enum nvkm_subdev_type *ptype, int *pinst) +{ + switch (mc_engine_idx) { + case MC_ENGINE_IDX_GSP: + *ptype = NVKM_SUBDEV_GSP; + *pinst = 0; + return true; + case MC_ENGINE_IDX_DISP: + *ptype = NVKM_ENGINE_DISP; + *pinst = 0; + return true; + case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9: + *ptype = NVKM_ENGINE_CE; + *pinst = mc_engine_idx - MC_ENGINE_IDX_CE0; + return true; + case MC_ENGINE_IDX_GR0: + *ptype = NVKM_ENGINE_GR; + *pinst = 0; + return true; + case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7: + *ptype = NVKM_ENGINE_NVDEC; + *pinst = mc_engine_idx - MC_ENGINE_IDX_NVDEC0; + return true; + case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2: + *ptype = NVKM_ENGINE_NVENC; + *pinst = mc_engine_idx - MC_ENGINE_IDX_MSENC; + return true; + case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7: + *ptype = NVKM_ENGINE_NVJPG; + *pinst = mc_engine_idx - MC_ENGINE_IDX_NVJPEG0; + return true; + case MC_ENGINE_IDX_OFA0: + *ptype = NVKM_ENGINE_OFA; + *pinst = 0; + return true; + default: + return false; + } +} + static int r535_gsp_intr_get_table(struct nvkm_gsp *gsp) { NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl; + const struct nvkm_rm_api *rmapi = gsp->rm->api; int ret = 0; ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, @@ -112,42 +154,8 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp) ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask, ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall); - switch (ctrl->table[i].engineIdx) { - case MC_ENGINE_IDX_GSP: - type = NVKM_SUBDEV_GSP; - inst = 0; - break; - case MC_ENGINE_IDX_DISP: - type = NVKM_ENGINE_DISP; - inst = 0; - break; - case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9: - type = NVKM_ENGINE_CE; - inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0; - break; - case MC_ENGINE_IDX_GR0: - type = NVKM_ENGINE_GR; - inst = 0; - break; - case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7: - type = NVKM_ENGINE_NVDEC; - inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0; - break; - case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2: - type = NVKM_ENGINE_NVENC; - inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC; - break; - case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7: - type = NVKM_ENGINE_NVJPG; - inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0; - break; - case MC_ENGINE_IDX_OFA0: - type = NVKM_ENGINE_OFA; - inst = 0; - break; - default: + if (!rmapi->gsp->xlat_mc_engine_idx(ctrl->table[i].engineIdx, &type, &inst)) continue; - } if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) { ret = -ENOSPC; @@ -2160,4 +2168,5 @@ const struct nvkm_rm_api_gsp r535_gsp = { .set_system_info = r535_gsp_set_system_info, .get_static_info = r535_gsp_get_static_info, + .xlat_mc_engine_idx = r535_gsp_xlat_mc_engine_idx, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 445793d8147b..aecb066982d8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -29,6 +29,7 @@ struct nvkm_rm_api { const struct nvkm_rm_api_gsp { int (*set_system_info)(struct nvkm_gsp *); int (*get_static_info)(struct nvkm_gsp *); + bool (*xlat_mc_engine_idx)(u32 mc_engine_idx, enum nvkm_subdev_type *, int *inst); } *gsp; const struct nvkm_rm_api_rpc { -- cgit v1.2.3 From e95bb6b6ebde01c4642f132fbd5c0af19658b622 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:37 +1000 Subject: drm/nouveau/gsp: add hal for gsp.drop_send_user_shared_data() 545.23.06 removes NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, but has another event (NVLINK_FAULT_UP) in its place. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 11 ++++++++++- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 1 + 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index 65640b43f477..f7cc8e03d999 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -2092,6 +2092,12 @@ r535_gsp_dtor(struct nvkm_gsp *gsp) nvkm_gsp_mem_dtor(&gsp->logrm); } +static void +r535_gsp_drop_send_user_shared_data(struct nvkm_gsp *gsp) +{ + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL); +} + int r535_gsp_oneinit(struct nvkm_gsp *gsp) { @@ -2139,7 +2145,9 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp); r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL); r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL); - r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL); + if (rmapi->gsp->drop_send_user_shared_data) + rmapi->gsp->drop_send_user_shared_data(gsp); + ret = r535_gsp_rm_boot_ctor(gsp); if (ret) return ret; @@ -2169,4 +2177,5 @@ r535_gsp = { .set_system_info = r535_gsp_set_system_info, .get_static_info = r535_gsp_get_static_info, .xlat_mc_engine_idx = r535_gsp_xlat_mc_engine_idx, + .drop_send_user_shared_data = r535_gsp_drop_send_user_shared_data, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index aecb066982d8..4a27e8bfafcc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -30,6 +30,7 @@ struct nvkm_rm_api { int (*set_system_info)(struct nvkm_gsp *); int (*get_static_info)(struct nvkm_gsp *); bool (*xlat_mc_engine_idx)(u32 mc_engine_idx, enum nvkm_subdev_type *, int *inst); + void (*drop_send_user_shared_data)(struct nvkm_gsp *); } *gsp; const struct nvkm_rm_api_rpc { -- cgit v1.2.3 From 8f8d9bca2ff0e85d0818fcdf7f7f0068fe290c92 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:37 +1000 Subject: drm/nouveau/gsp: add hal for disp.bl_ctrl() 545.23.06 has incompatible changes to NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c | 57 ++++++++++++---------- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 5 ++ 3 files changed, 37 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c index 1ba86e223978..1e9bbfd402d3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c @@ -252,47 +252,47 @@ r535_core = { }; static int -r535_sor_bl_set(struct nvkm_ior *sor, int lvl) +r535_bl_ctrl(struct nvkm_disp *disp, unsigned display_id, bool set, int *pval) { - struct nvkm_disp *disp = sor->disp; + u32 cmd = set ? NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS : + NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS; NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl; + int ret; - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS, - sizeof(*ctrl)); + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, cmd, sizeof(*ctrl)); if (IS_ERR(ctrl)) return PTR_ERR(ctrl); - ctrl->displayId = BIT(sor->asy.outp->index); - ctrl->brightness = lvl; + ctrl->displayId = BIT(display_id); + ctrl->brightness = *pval; - return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) + return ret; + + *pval = ctrl->brightness; + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; } static int -r535_sor_bl_get(struct nvkm_ior *sor) +r535_sor_bl_set(struct nvkm_ior *sor, int lvl) { struct nvkm_disp *disp = sor->disp; - NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl; - int ret, lvl; - - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); + const struct nvkm_rm_api *rmapi = disp->engine.subdev.device->gsp->rm->api; - ctrl->displayId = BIT(sor->asy.outp->index); + return rmapi->disp->bl_ctrl(disp, sor->asy.outp->index, true, &lvl); +} - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; - } +static int +r535_sor_bl_get(struct nvkm_ior *sor) +{ + struct nvkm_disp *disp = sor->disp; + const struct nvkm_rm_api *rmapi = disp->engine.subdev.device->gsp->rm->api; + int lvl, ret = rmapi->disp->bl_ctrl(disp, sor->asy.outp->index, false, &lvl); - lvl = ctrl->brightness; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return lvl; + return (ret == 0) ? lvl : ret; } static const struct nvkm_ior_func_bl @@ -1722,3 +1722,8 @@ r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device, mutex_init(&(*pdisp)->super.mutex); //XXX return ret; } + +const struct nvkm_rm_api_disp +r535_disp = { + .bl_ctrl = r535_bl_ctrl, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c index efedd387fcc5..9eff944f6c39 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c @@ -28,6 +28,7 @@ r535_api = { .alloc = &r535_alloc, .client = &r535_client, .device = &r535_device, + .disp = &r535_disp, .ce = &r535_ce, .nvdec = &r535_nvdec, .nvenc = &r535_nvenc, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 4a27e8bfafcc..2386e419be62 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -70,6 +70,10 @@ struct nvkm_rm_api { } event; } *device; + const struct nvkm_rm_api_disp { + int (*bl_ctrl)(struct nvkm_disp *, unsigned display_id, bool set, int *val); + } *disp; + const struct nvkm_rm_api_engine { int (*alloc)(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst, struct nvkm_gsp_object *); @@ -84,6 +88,7 @@ extern const struct nvkm_rm_api_ctrl r535_ctrl; extern const struct nvkm_rm_api_alloc r535_alloc; extern const struct nvkm_rm_api_client r535_client; extern const struct nvkm_rm_api_device r535_device; +extern const struct nvkm_rm_api_disp r535_disp; extern const struct nvkm_rm_api_engine r535_ce; void *r535_gr_dtor(struct nvkm_gr *); int r535_gr_oneinit(struct nvkm_gr *); -- cgit v1.2.3 From a3f3232903213530f9ae675de4a3be0970ea9eea Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:37 +1000 Subject: drm/nouveau/gsp: add hal for disp.dp.set_indexed_link_rates() 545.23.06 has incompatible changes to NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c | 21 ++++++++++++++++----- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 5 +++++ 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c index 1e9bbfd402d3..9eff06e12fca 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c @@ -1021,15 +1021,11 @@ r535_dp_train(struct nvkm_outp *outp, bool retrain) } static int -r535_dp_rates(struct nvkm_outp *outp) +r535_dp_set_indexed_link_rates(struct nvkm_outp *outp) { NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl; struct nvkm_disp *disp = outp->disp; - if (outp->conn->info.type != DCB_CONNECTOR_eDP || - !outp->dp.rates || outp->dp.rate[0].dpcd < 0) - return 0; - if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl))) return -EINVAL; @@ -1045,6 +1041,18 @@ r535_dp_rates(struct nvkm_outp *outp) return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); } +static int +r535_dp_rates(struct nvkm_outp *outp) +{ + struct nvkm_rm *rm = outp->disp->rm.objcom.client->gsp->rm; + + if (outp->conn->info.type != DCB_CONNECTOR_eDP || + !outp->dp.rates || outp->dp.rate[0].dpcd < 0) + return 0; + + return rm->api->disp->dp.set_indexed_link_rates(outp); +} + static int r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize) { @@ -1726,4 +1734,7 @@ r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device, const struct nvkm_rm_api_disp r535_disp = { .bl_ctrl = r535_bl_ctrl, + .dp = { + .set_indexed_link_rates = r535_dp_set_indexed_link_rates, + } }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 2386e419be62..9df95c5b9961 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -6,6 +6,7 @@ #ifndef __NVKM_RM_H__ #define __NVKM_RM_H__ #include "handles.h" +struct nvkm_outp; struct nvkm_rm_impl { const struct nvkm_rm_wpr *wpr; @@ -72,6 +73,10 @@ struct nvkm_rm_api { const struct nvkm_rm_api_disp { int (*bl_ctrl)(struct nvkm_disp *, unsigned display_id, bool set, int *val); + + struct { + int (*set_indexed_link_rates)(struct nvkm_outp *); + } dp; } *disp; const struct nvkm_rm_api_engine { -- cgit v1.2.3 From 6854ce2c942da5180936c676ff29c4dc1899a844 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:38 +1000 Subject: drm/nouveau/gsp: add hal for disp.get_static_info() 550.40.07 has incompatible changes to NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c | 37 ++++++++++++++-------- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 2 ++ 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c index 9eff06e12fca..82fc159ec070 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c @@ -1440,11 +1440,31 @@ r535_disp_init(struct nvkm_disp *disp) return 0; } +static int +r535_disp_get_static_info(struct nvkm_disp *disp) +{ + NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl; + struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp; + + ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + disp->wndw.mask = ctrl->windowPresentMask; + disp->wndw.nr = fls(disp->wndw.mask); + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return 0; +} + static int r535_disp_oneinit(struct nvkm_disp *disp) { struct nvkm_device *device = disp->engine.subdev.device; struct nvkm_gsp *gsp = device->gsp; + const struct nvkm_rm_api *rmapi = gsp->rm->api; NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *ctrl; int ret, i; @@ -1481,19 +1501,9 @@ r535_disp_oneinit(struct nvkm_disp *disp) if (ret) return ret; - { - NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl; - - ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, - NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO, - sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - disp->wndw.mask = ctrl->windowPresentMask; - disp->wndw.nr = fls(disp->wndw.mask); - nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); - } + ret = rmapi->disp->get_static_info(disp); + if (ret) + return ret; /* */ { @@ -1733,6 +1743,7 @@ r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device, const struct nvkm_rm_api_disp r535_disp = { + .get_static_info = r535_disp_get_static_info, .bl_ctrl = r535_bl_ctrl, .dp = { .set_indexed_link_rates = r535_dp_set_indexed_link_rates, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 9df95c5b9961..b9c775aec58e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -72,6 +72,8 @@ struct nvkm_rm_api { } *device; const struct nvkm_rm_api_disp { + int (*get_static_info)(struct nvkm_disp *); + int (*bl_ctrl)(struct nvkm_disp *, unsigned display_id, bool set, int *val); struct { -- cgit v1.2.3 From e0ed9434aa45f09143647d9d3439fbb7baa097a0 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:38 +1000 Subject: drm/nouveau/gsp: add hal for disp.chan.set_pushbuf() 550.40.07 has incompatible changes to NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c | 27 +++++++++++++--------- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 5 ++++ 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c index 82fc159ec070..389b2738f711 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c @@ -70,9 +70,9 @@ r535_chan_fini(struct nvkm_disp_chan *chan) } static int -r535_chan_push(struct nvkm_disp_chan *chan) +r535_disp_chan_set_pushbuf(struct nvkm_disp *disp, s32 oclass, int inst, struct nvkm_memory *memory) { - struct nvkm_gsp *gsp = chan->disp->engine.subdev.device->gsp; + struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp; NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl; ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, @@ -81,8 +81,8 @@ r535_chan_push(struct nvkm_disp_chan *chan) if (IS_ERR(ctrl)) return PTR_ERR(ctrl); - if (chan->memory) { - switch (nvkm_memory_target(chan->memory)) { + if (memory) { + switch (nvkm_memory_target(memory)) { case NVKM_MEM_TARGET_NCOH: ctrl->addressSpace = ADDR_SYSMEM; ctrl->cacheSnoop = 0; @@ -99,13 +99,13 @@ r535_chan_push(struct nvkm_disp_chan *chan) return -EINVAL; } - ctrl->physicalAddr = nvkm_memory_addr(chan->memory); - ctrl->limit = nvkm_memory_size(chan->memory) - 1; + ctrl->physicalAddr = nvkm_memory_addr(memory); + ctrl->limit = nvkm_memory_size(memory) - 1; } - ctrl->hclass = chan->object.oclass; - ctrl->channelInstance = chan->head; - ctrl->valid = ((chan->object.oclass & 0xff) != 0x7a) ? 1 : 0; + ctrl->hclass = oclass; + ctrl->channelInstance = inst; + ctrl->valid = ((oclass & 0xff) != 0x7a) ? 1 : 0; return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); } @@ -113,10 +113,11 @@ r535_chan_push(struct nvkm_disp_chan *chan) static int r535_curs_init(struct nvkm_disp_chan *chan) { + const struct nvkm_rm_api *rmapi = chan->disp->rm.objcom.client->gsp->rm->api; NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *args; int ret; - ret = r535_chan_push(chan); + ret = rmapi->disp->chan.set_pushbuf(chan->disp, chan->object.oclass, chan->head, NULL); if (ret) return ret; @@ -166,10 +167,11 @@ r535_dmac_fini(struct nvkm_disp_chan *chan) static int r535_dmac_init(struct nvkm_disp_chan *chan) { + const struct nvkm_rm_api *rmapi = chan->disp->rm.objcom.client->gsp->rm->api; NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args; int ret; - ret = r535_chan_push(chan); + ret = rmapi->disp->chan.set_pushbuf(chan->disp, chan->object.oclass, chan->head, chan->memory); if (ret) return ret; @@ -1747,5 +1749,8 @@ r535_disp = { .bl_ctrl = r535_bl_ctrl, .dp = { .set_indexed_link_rates = r535_dp_set_indexed_link_rates, + }, + .chan = { + .set_pushbuf = r535_disp_chan_set_pushbuf, } }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index b9c775aec58e..a8f070871d80 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -79,6 +79,11 @@ struct nvkm_rm_api { struct { int (*set_indexed_link_rates)(struct nvkm_outp *); } dp; + + struct { + int (*set_pushbuf)(struct nvkm_disp *, s32 oclass, int inst, + struct nvkm_memory *); + } chan; } *disp; const struct nvkm_rm_api_engine { -- cgit v1.2.3 From 727937b337feff684818ff7728b86d2196c87012 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:38 +1000 Subject: drm/nouveau/gsp: add hal for fifo.xlat_rm_engine_type() 550.40.07 has incompatible changes to RM_ENGINE_TYPE defines. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c | 101 ++++++++++----------- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 6 ++ 3 files changed, 56 insertions(+), 52 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c index ad9d93f9820d..136a64d82973 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c @@ -324,56 +324,52 @@ r535_runl = { }; static int -r535_fifo_2080_type(enum nvkm_subdev_type type, int inst) +r535_fifo_xlat_rm_engine_type(u32 rm, enum nvkm_subdev_type *ptype, int *p2080) { - switch (type) { - case NVKM_ENGINE_GR: return NV2080_ENGINE_TYPE_GR0; - case NVKM_ENGINE_CE: return NV2080_ENGINE_TYPE_COPY0 + inst; - case NVKM_ENGINE_SEC2: return NV2080_ENGINE_TYPE_SEC2; - case NVKM_ENGINE_NVDEC: return NV2080_ENGINE_TYPE_NVDEC0 + inst; - case NVKM_ENGINE_NVENC: return NV2080_ENGINE_TYPE_NVENC0 + inst; - case NVKM_ENGINE_NVJPG: return NV2080_ENGINE_TYPE_NVJPEG0 + inst; - case NVKM_ENGINE_OFA: return NV2080_ENGINE_TYPE_OFA; - case NVKM_ENGINE_SW: return NV2080_ENGINE_TYPE_SW; - default: - break; - } +#define RM_ENGINE_TYPE(RM,NVKM,INST) \ + RM_ENGINE_TYPE_##RM: \ + *ptype = NVKM_ENGINE_##NVKM; \ + *p2080 = NV2080_ENGINE_TYPE_##RM; \ + return INST - WARN_ON(1); - return -EINVAL; -} - -static int -r535_fifo_engn_type(RM_ENGINE_TYPE rm, enum nvkm_subdev_type *ptype) -{ switch (rm) { - case RM_ENGINE_TYPE_GR0: - *ptype = NVKM_ENGINE_GR; - return 0; - case RM_ENGINE_TYPE_COPY0...RM_ENGINE_TYPE_COPY9: - *ptype = NVKM_ENGINE_CE; - return rm - RM_ENGINE_TYPE_COPY0; - case RM_ENGINE_TYPE_NVDEC0...RM_ENGINE_TYPE_NVDEC7: - *ptype = NVKM_ENGINE_NVDEC; - return rm - RM_ENGINE_TYPE_NVDEC0; - case RM_ENGINE_TYPE_NVENC0...RM_ENGINE_TYPE_NVENC2: - *ptype = NVKM_ENGINE_NVENC; - return rm - RM_ENGINE_TYPE_NVENC0; - case RM_ENGINE_TYPE_SW: - *ptype = NVKM_ENGINE_SW; - return 0; - case RM_ENGINE_TYPE_SEC2: - *ptype = NVKM_ENGINE_SEC2; - return 0; - case RM_ENGINE_TYPE_NVJPEG0...RM_ENGINE_TYPE_NVJPEG7: - *ptype = NVKM_ENGINE_NVJPG; - return rm - RM_ENGINE_TYPE_NVJPEG0; - case RM_ENGINE_TYPE_OFA: - *ptype = NVKM_ENGINE_OFA; - return 0; + case RM_ENGINE_TYPE( GR0, GR, 0); + case RM_ENGINE_TYPE( COPY0, CE, 0); + case RM_ENGINE_TYPE( COPY1, CE, 1); + case RM_ENGINE_TYPE( COPY2, CE, 2); + case RM_ENGINE_TYPE( COPY3, CE, 3); + case RM_ENGINE_TYPE( COPY4, CE, 4); + case RM_ENGINE_TYPE( COPY5, CE, 5); + case RM_ENGINE_TYPE( COPY6, CE, 6); + case RM_ENGINE_TYPE( COPY7, CE, 7); + case RM_ENGINE_TYPE( COPY8, CE, 8); + case RM_ENGINE_TYPE( COPY9, CE, 9); + case RM_ENGINE_TYPE( NVDEC0, NVDEC, 0); + case RM_ENGINE_TYPE( NVDEC1, NVDEC, 1); + case RM_ENGINE_TYPE( NVDEC2, NVDEC, 2); + case RM_ENGINE_TYPE( NVDEC3, NVDEC, 3); + case RM_ENGINE_TYPE( NVDEC4, NVDEC, 4); + case RM_ENGINE_TYPE( NVDEC5, NVDEC, 5); + case RM_ENGINE_TYPE( NVDEC6, NVDEC, 6); + case RM_ENGINE_TYPE( NVDEC7, NVDEC, 7); + case RM_ENGINE_TYPE( NVENC0, NVENC, 0); + case RM_ENGINE_TYPE( NVENC1, NVENC, 1); + case RM_ENGINE_TYPE( NVENC2, NVENC, 2); + case RM_ENGINE_TYPE(NVJPEG0, NVJPG, 0); + case RM_ENGINE_TYPE(NVJPEG1, NVJPG, 1); + case RM_ENGINE_TYPE(NVJPEG2, NVJPG, 2); + case RM_ENGINE_TYPE(NVJPEG3, NVJPG, 3); + case RM_ENGINE_TYPE(NVJPEG4, NVJPG, 4); + case RM_ENGINE_TYPE(NVJPEG5, NVJPG, 5); + case RM_ENGINE_TYPE(NVJPEG6, NVJPG, 6); + case RM_ENGINE_TYPE(NVJPEG7, NVJPG, 7); + case RM_ENGINE_TYPE( SW, SW, 0); + case RM_ENGINE_TYPE( SEC2, SEC2, 0); + case RM_ENGINE_TYPE( OFA, OFA, 0); default: return -EINVAL; } +#undef RM_ENGINE_TYPE } static int @@ -410,7 +406,9 @@ static int r535_fifo_runl_ctor(struct nvkm_fifo *fifo) { struct nvkm_subdev *subdev = &fifo->engine.subdev; - struct nvkm_gsp *gsp = subdev->device->gsp; + struct nvkm_device *device = subdev->device; + struct nvkm_gsp *gsp = device->gsp; + struct nvkm_rm *rm = gsp->rm; struct nvkm_runl *runl; struct nvkm_engn *engn; u32 cgids = 2048; @@ -450,19 +448,13 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo) if (!runl) continue; - inst = r535_fifo_engn_type(rmid, &type); + inst = rm->api->fifo->xlat_rm_engine_type(rmid, &type, &nv2080); if (inst < 0) { nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid); nvkm_runl_del(runl); continue; } - nv2080 = r535_fifo_2080_type(type, inst); - if (nv2080 < 0) { - nvkm_runl_del(runl); - continue; - } - ret = nvkm_rm_engine_new(gsp->rm, type, inst); if (ret) { nvkm_runl_del(runl); @@ -544,3 +536,8 @@ r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device, return nvkm_fifo_new_(rm, device, type, inst, pfifo); } + +const struct nvkm_rm_api_fifo +r535_fifo = { + .xlat_rm_engine_type = r535_fifo_xlat_rm_engine_type, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c index 9eff944f6c39..8d41df85fb19 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c @@ -29,6 +29,7 @@ r535_api = { .client = &r535_client, .device = &r535_device, .disp = &r535_disp, + .fifo = &r535_fifo, .ce = &r535_ce, .nvdec = &r535_nvdec, .nvenc = &r535_nvenc, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index a8f070871d80..23a9a2043d9c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -86,6 +86,11 @@ struct nvkm_rm_api { } chan; } *disp; + const struct nvkm_rm_api_fifo { + int (*xlat_rm_engine_type)(u32 rm_engine_type, + enum nvkm_subdev_type *, int *nv2080_type); + } *fifo; + const struct nvkm_rm_api_engine { int (*alloc)(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst, struct nvkm_gsp_object *); @@ -101,6 +106,7 @@ extern const struct nvkm_rm_api_alloc r535_alloc; extern const struct nvkm_rm_api_client r535_client; extern const struct nvkm_rm_api_device r535_device; extern const struct nvkm_rm_api_disp r535_disp; +extern const struct nvkm_rm_api_fifo r535_fifo; extern const struct nvkm_rm_api_engine r535_ce; void *r535_gr_dtor(struct nvkm_gr *); int r535_gr_oneinit(struct nvkm_gr *); -- cgit v1.2.3 From f308c9ffdc2b4e7532504899d058bbb850353577 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:38 +1000 Subject: drm/nouveau/gsp: add hal for fifo.ectx_size() NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO is moved to NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO in 550.40.07. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c | 3 ++- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c index 136a64d82973..55022ad67208 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c @@ -507,7 +507,7 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo) nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); } - return r535_fifo_ectx_size(fifo); + return rm->api->fifo->ectx_size(fifo); } static void @@ -540,4 +540,5 @@ r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device, const struct nvkm_rm_api_fifo r535_fifo = { .xlat_rm_engine_type = r535_fifo_xlat_rm_engine_type, + .ectx_size = r535_fifo_ectx_size, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 23a9a2043d9c..8783c21af0e7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -89,6 +89,7 @@ struct nvkm_rm_api { const struct nvkm_rm_api_fifo { int (*xlat_rm_engine_type)(u32 rm_engine_type, enum nvkm_subdev_type *, int *nv2080_type); + int (*ectx_size)(struct nvkm_fifo *); } *fifo; const struct nvkm_rm_api_engine { -- cgit v1.2.3 From 2f9974fdd56a5cb65476707fa21636b3a90e1dc9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:38 +1000 Subject: drm/nouveau/gsp: add hal for gr.get_ctxbufs_info() NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO has incompatible changes in 550.40.07. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c | 4 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c | 186 ++++++++++++--------- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 7 + 4 files changed, 114 insertions(+), 84 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c index 55022ad67208..58a47c62690a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c @@ -250,7 +250,7 @@ r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *c } static const struct nvkm_engn_func -r535_gr = { +r535_engn_gr = { .nonstall = r535_engn_nonstall, .ctor2 = r535_gr_ctor, }; @@ -468,7 +468,7 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo) engn = nvkm_runl_add(runl, nv2080, &r535_engn_ce, type, inst); break; case NVKM_ENGINE_GR: - engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst); + engn = nvkm_runl_add(runl, nv2080, &r535_engn_gr, type, inst); break; case NVKM_ENGINE_NVDEC: case NVKM_ENGINE_NVENC: diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c index 4c0df52e8683..e0fa88aa608f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c @@ -170,10 +170,106 @@ r535_gr_units(struct nvkm_gr *gr) return (gsp->gr.tpcs << 8) | gsp->gr.gpcs; } +static void +r535_gr_get_ctxbuf_info(struct r535_gr *gr, int i, + struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO *info) +{ + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + static const struct { + u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */ + u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */ + bool global; + bool init; + bool ro; + } map[] = { +#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \ + .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \ + .global = (G), .init = (I), .ro = (R) } +#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R)) + /* global init ro */ + _A( GRAPHICS, MAIN, false, true, false), + _B( PATCH, false, true, false), + _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false), + _B( PAGEPOOL, true, false, false), + _B( ATTRIBUTE_CB, true, false, false), + _B( RTV_CB_GLOBAL, true, false, false), + _B( FECS_EVENT, true, true, false), + _B( PRIV_ACCESS_MAP, true, true, true), +#undef _B +#undef _A + }; + u32 size = info->size; + u8 align, page; + int id; + + for (id = 0; id < ARRAY_SIZE(map); id++) { + if (map[id].id0 == i) + break; + } + + nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i, + size, (id < ARRAY_SIZE(map)) ? "*" : ""); + if (id >= ARRAY_SIZE(map)) + return; + + if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN) + size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */ + + if (size >= 1 << 21) page = 21; + else if (size >= 1 << 16) page = 16; + else page = 12; + + if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB) + align = order_base_2(size); + else + align = page; + + if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf))) + return; + + gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1; + gr->ctxbuf[gr->ctxbuf_nr].size = size; + gr->ctxbuf[gr->ctxbuf_nr].page = page; + gr->ctxbuf[gr->ctxbuf_nr].align = align; + gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global; + gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init; + gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro; + gr->ctxbuf_nr++; + + if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) { + if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf))) + return; + + gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1]; + gr->ctxbuf[gr->ctxbuf_nr].bufferId = + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP; + gr->ctxbuf_nr++; + } +} + +static int +r535_gr_get_ctxbufs_info(struct r535_gr *gr) +{ + NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info; + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_gsp *gsp = subdev->device->gsp; + + info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO, + sizeof(*info)); + if (WARN_ON(IS_ERR(info))) + return PTR_ERR(info); + + for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) + r535_gr_get_ctxbuf_info(gr, i, &info->engineContextBuffersInfo[0].engine[i]); + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info); + return 0; +} + int r535_gr_oneinit(struct nvkm_gr *base) { - NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info; struct r535_gr *gr = container_of(base, typeof(*gr), base); struct nvkm_subdev *subdev = &gr->base.engine.subdev; struct nvkm_device *device = subdev->device; @@ -269,88 +365,9 @@ r535_gr_oneinit(struct nvkm_gr *base) * * Also build the information that'll be used to create channel contexts. */ - info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, - NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO, - sizeof(*info)); - if (WARN_ON(IS_ERR(info))) { - ret = PTR_ERR(info); + ret = gsp->rm->api->gr->get_ctxbufs_info(gr); + if (ret) goto done; - } - - for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) { - static const struct { - u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */ - u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */ - bool global; - bool init; - bool ro; - } map[] = { -#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \ - .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \ - .global = (G), .init = (I), .ro = (R) } -#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R)) - /* global init ro */ - _A( GRAPHICS, MAIN, false, true, false), - _B( PATCH, false, true, false), - _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false), - _B( PAGEPOOL, true, false, false), - _B( ATTRIBUTE_CB, true, false, false), - _B( RTV_CB_GLOBAL, true, false, false), - _B( FECS_EVENT, true, true, false), - _B( PRIV_ACCESS_MAP, true, true, true), -#undef _B -#undef _A - }; - u32 size = info->engineContextBuffersInfo[0].engine[i].size; - u8 align, page; - int id; - - for (id = 0; id < ARRAY_SIZE(map); id++) { - if (map[id].id0 == i) - break; - } - - nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i, - size, (id < ARRAY_SIZE(map)) ? "*" : ""); - if (id >= ARRAY_SIZE(map)) - continue; - - if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN) - size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */ - - if (size >= 1 << 21) page = 21; - else if (size >= 1 << 16) page = 16; - else page = 12; - - if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB) - align = order_base_2(size); - else - align = page; - - if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf))) - continue; - - gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1; - gr->ctxbuf[gr->ctxbuf_nr].size = size; - gr->ctxbuf[gr->ctxbuf_nr].page = page; - gr->ctxbuf[gr->ctxbuf_nr].align = align; - gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global; - gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init; - gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro; - gr->ctxbuf_nr++; - - if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) { - if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf))) - continue; - - gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1]; - gr->ctxbuf[gr->ctxbuf_nr].bufferId = - NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP; - gr->ctxbuf_nr++; - } - } - - nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info); /* Promote golden context to RM. */ ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan); @@ -385,3 +402,8 @@ r535_gr_dtor(struct nvkm_gr *base) kfree(gr->base.func); return gr; } + +const struct nvkm_rm_api_gr +r535_gr = { + .get_ctxbufs_info = r535_gr_get_ctxbufs_info, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c index 8d41df85fb19..43cf44bf3abb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c @@ -31,6 +31,7 @@ r535_api = { .disp = &r535_disp, .fifo = &r535_fifo, .ce = &r535_ce, + .gr = &r535_gr, .nvdec = &r535_nvdec, .nvenc = &r535_nvenc, .nvjpg = &r535_nvjpg, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 8783c21af0e7..f085e25e4e08 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -7,6 +7,7 @@ #define __NVKM_RM_H__ #include "handles.h" struct nvkm_outp; +struct r535_gr; struct nvkm_rm_impl { const struct nvkm_rm_wpr *wpr; @@ -96,6 +97,11 @@ struct nvkm_rm_api { int (*alloc)(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst, struct nvkm_gsp_object *); } *ce, *nvdec, *nvenc, *nvjpg, *ofa; + + const struct nvkm_rm_api_gr { + int (*get_ctxbufs_info)(struct r535_gr *); + } *gr; + }; extern const struct nvkm_rm_impl r535_rm_tu102; @@ -109,6 +115,7 @@ extern const struct nvkm_rm_api_device r535_device; extern const struct nvkm_rm_api_disp r535_disp; extern const struct nvkm_rm_api_fifo r535_fifo; extern const struct nvkm_rm_api_engine r535_ce; +extern const struct nvkm_rm_api_gr r535_gr; void *r535_gr_dtor(struct nvkm_gr *); int r535_gr_oneinit(struct nvkm_gr *); u64 r535_gr_units(struct nvkm_gr *); -- cgit v1.2.3 From 207c445b31aa6e080b51881a288bfffafd6011a4 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:38 +1000 Subject: drm/nouveau/gsp: add hal for gsp.set_rmargs() 555.42.02 has incompatible changes to GSP_ARGUMENTS_CACHED. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h | 1 - .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 36 +++++++++++++--------- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c | 2 +- 4 files changed, 23 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h index d42ae235d2f4..c8429863b642 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -71,7 +71,6 @@ void r535_gsp_dtor(struct nvkm_gsp *); int r535_gsp_oneinit(struct nvkm_gsp *); int r535_gsp_init(struct nvkm_gsp *); int r535_gsp_fini(struct nvkm_gsp *, bool suspend); -int r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume); int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index f7cc8e03d999..0c05a0448766 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -1196,23 +1196,11 @@ r535_gsp_shared_init(struct nvkm_gsp *gsp) return 0; } -int -r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume) +static void +r535_gsp_set_rmargs(struct nvkm_gsp *gsp, bool resume) { - GSP_ARGUMENTS_CACHED *args; - int ret; + GSP_ARGUMENTS_CACHED *args = gsp->rmargs.data; - if (!resume) { - ret = r535_gsp_shared_init(gsp); - if (ret) - return ret; - - ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs); - if (ret) - return ret; - } - - args = gsp->rmargs.data; args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr; args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr; args->messageQueueInitArguments.cmdQueueOffset = @@ -1229,7 +1217,24 @@ r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume) args->srInitArguments.flags = 0; args->srInitArguments.bInPMTransition = 1; } +} + +static int +r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume) +{ + int ret; + + if (!resume) { + ret = r535_gsp_shared_init(gsp); + if (ret) + return ret; + + ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs); + if (ret) + return ret; + } + gsp->rm->api->gsp->set_rmargs(gsp, resume); return 0; } @@ -2174,6 +2179,7 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) const struct nvkm_rm_api_gsp r535_gsp = { + .set_rmargs = r535_gsp_set_rmargs, .set_system_info = r535_gsp_set_system_info, .get_static_info = r535_gsp_get_static_info, .xlat_mc_engine_idx = r535_gsp_xlat_mc_engine_idx, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index f085e25e4e08..3d677e5bdd2c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -29,6 +29,7 @@ struct nvkm_rm_wpr { struct nvkm_rm_api { const struct nvkm_rm_api_gsp { + void (*set_rmargs)(struct nvkm_gsp *, bool resume); int (*set_system_info)(struct nvkm_gsp *); int (*get_static_info)(struct nvkm_gsp *); bool (*xlat_mc_engine_idx)(u32 mc_engine_idx, enum nvkm_subdev_type *, int *inst); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c index a07f59e5ef7a..b080a8da1caf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c @@ -183,7 +183,7 @@ tu102_gsp_init(struct nvkm_gsp *gsp) mbox0 = lower_32_bits(gsp->wpr_meta.addr); mbox1 = upper_32_bits(gsp->wpr_meta.addr); } else { - r535_gsp_rmargs_init(gsp, true); + gsp->rm->api->gsp->set_rmargs(gsp, true); mbox0 = lower_32_bits(gsp->sr.meta.addr); mbox1 = upper_32_bits(gsp->sr.meta.addr); -- cgit v1.2.3 From c21b039715ce9f4a10b77782e636c39dd8869869 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 29 Jan 2025 10:29:40 +1000 Subject: drm/nouveau/gsp: add hals for fbsr.suspend/resume() 555.42.02 has incompatible changes to FBSR. At the same time, move the calling of FBSR functions from the instmem subdev's suspend/resume paths, to GSP's. This is needed to fix ordering issues that arise from changes to FBSR in newer RM versions. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h | 1 + .../gpu/drm/nouveau/include/nvkm/subdev/instmem.h | 5 ---- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c | 29 +++++++++++----------- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 11 ++++++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 6 +++++ drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c | 8 +++--- 7 files changed, 38 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index ef781c4ca11f..40e1b5300dff 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -133,6 +133,7 @@ struct nvkm_gsp { struct sg_table sgt; struct nvkm_gsp_radix3 radix3; struct nvkm_gsp_mem meta; + struct sg_table fbsr; } sr; struct { diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h index e10cbd9203ec..7d93c742ee59 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h @@ -24,11 +24,6 @@ struct nvkm_instmem { struct nvkm_ramht *ramht; struct nvkm_memory *ramro; struct nvkm_memory *ramfc; - - struct { - struct sg_table fbsr; - bool fbsr_valid; - } rm; }; u32 nvkm_instmem_rd32(struct nvkm_instmem *, u32 addr); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c index b2f22bd93f4e..0e436c4fb4e0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c @@ -201,21 +201,18 @@ fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory) } static void -r535_instmem_resume(struct nvkm_instmem *imem) +r535_fbsr_resume(struct nvkm_gsp *gsp) { /* RM has restored VRAM contents already, so just need to free the sysmem buffer. */ - if (imem->rm.fbsr_valid) { - nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr); - imem->rm.fbsr_valid = false; - } + nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.fbsr); } static int -r535_instmem_suspend(struct nvkm_instmem *imem) +r535_fbsr_suspend(struct nvkm_gsp *gsp) { - struct nvkm_subdev *subdev = &imem->subdev; + struct nvkm_subdev *subdev = &gsp->subdev; struct nvkm_device *device = subdev->device; - struct nvkm_gsp *gsp = device->gsp; + struct nvkm_instmem *imem = device->imem; struct nvkm_instobj *iobj; struct fbsr fbsr = {}; struct fbsr_item *item, *temp; @@ -256,7 +253,7 @@ r535_instmem_suspend(struct nvkm_instmem *imem) fbsr.size += gsp->fb.bios.vga_workspace.size; nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size); - ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr); + ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &gsp->sr.fbsr); if (ret) goto done; @@ -265,7 +262,7 @@ r535_instmem_suspend(struct nvkm_instmem *imem) if (ret) goto done_sgt; - ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size); + ret = fbsr_init(&fbsr, &gsp->sr.fbsr, items_size); if (WARN_ON(ret)) goto done_sgt; @@ -276,12 +273,10 @@ r535_instmem_suspend(struct nvkm_instmem *imem) goto done_sgt; } - imem->rm.fbsr_valid = true; - /* Cleanup everything except the sysmem backup, which will be removed after resume. */ done_sgt: if (ret) /* ... unless we failed already. */ - nvkm_gsp_sg_free(device, &imem->rm.fbsr); + nvkm_gsp_sg_free(device, &gsp->sr.fbsr); done: list_for_each_entry_safe(item, temp, &fbsr.items, head) { list_del(&item->head); @@ -293,6 +288,12 @@ done: return ret; } +const struct nvkm_rm_api_fbsr +r535_fbsr = { + .suspend = r535_fbsr_suspend, + .resume = r535_fbsr_resume, +}; + static void * r535_instmem_dtor(struct nvkm_instmem *imem) { @@ -313,8 +314,6 @@ r535_instmem_new(const struct nvkm_instmem_func *hw, rm->dtor = r535_instmem_dtor; rm->fini = hw->fini; - rm->suspend = r535_instmem_suspend; - rm->resume = r535_instmem_resume; rm->memory_new = hw->memory_new; rm->memory_wrap = hw->memory_wrap; rm->zero = false; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index 0c05a0448766..64e9ecf93441 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -1730,6 +1730,7 @@ lvl1_fail: int r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) { + struct nvkm_rm *rm = gsp->rm; int ret; if (suspend) { @@ -1754,6 +1755,14 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) sr->revision = GSP_FW_SR_META_REVISION; sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr; sr->sizeOfSuspendResumeData = len; + + ret = rm->api->fbsr->suspend(gsp); + if (ret) { + nvkm_gsp_mem_dtor(&gsp->sr.meta); + nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3); + nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt); + return ret; + } } ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend); @@ -1787,6 +1796,8 @@ r535_gsp_init(struct nvkm_gsp *gsp) done: if (gsp->sr.meta.data) { + gsp->rm->api->fbsr->resume(gsp); + nvkm_gsp_mem_dtor(&gsp->sr.meta); nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3); nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c index 43cf44bf3abb..a4190676e1ad 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c @@ -28,6 +28,7 @@ r535_api = { .alloc = &r535_alloc, .client = &r535_client, .device = &r535_device, + .fbsr = &r535_fbsr, .disp = &r535_disp, .fifo = &r535_fifo, .ce = &r535_ce, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 3d677e5bdd2c..ce04ed9e3c27 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -73,6 +73,11 @@ struct nvkm_rm_api { } event; } *device; + const struct nvkm_rm_api_fbsr { + int (*suspend)(struct nvkm_gsp *); + void (*resume)(struct nvkm_gsp *); + } *fbsr; + const struct nvkm_rm_api_disp { int (*get_static_info)(struct nvkm_disp *); @@ -113,6 +118,7 @@ extern const struct nvkm_rm_api_ctrl r535_ctrl; extern const struct nvkm_rm_api_alloc r535_alloc; extern const struct nvkm_rm_api_client r535_client; extern const struct nvkm_rm_api_device r535_device; +extern const struct nvkm_rm_api_fbsr r535_fbsr; extern const struct nvkm_rm_api_disp r535_disp; extern const struct nvkm_rm_api_fifo r535_fifo; extern const struct nvkm_rm_api_engine r535_ce; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c index a2cd3330efc6..2f55bab8e132 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c @@ -182,9 +182,11 @@ nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend) int ret; if (suspend) { - ret = imem->func->suspend(imem); - if (ret) - return ret; + if (imem->func->suspend) { + ret = imem->func->suspend(imem); + if (ret) + return ret; + } imem->suspend = true; } -- cgit v1.2.3 From 1cf5940bdbee4446df387a8b70cda05bb920f693 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:38 +1000 Subject: drm/nouveau/gsp: add hal for disp.get_supported() 555.42.02 has incompatible changes to NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c | 42 ++++++++++++---------- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 1 + 2 files changed, 25 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c index 389b2738f711..40c50d9fca0b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c @@ -1442,6 +1442,21 @@ r535_disp_init(struct nvkm_disp *disp) return 0; } +static int +r535_disp_get_supported(struct nvkm_disp *disp, unsigned long *pmask) +{ + NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom, + NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + *pmask = ctrl->displayMask; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} + static int r535_disp_get_static_info(struct nvkm_disp *disp) { @@ -1468,6 +1483,7 @@ r535_disp_oneinit(struct nvkm_disp *disp) struct nvkm_gsp *gsp = device->gsp; const struct nvkm_rm_api *rmapi = gsp->rm->api; NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *ctrl; + unsigned long mask; int ret, i; /* RAMIN. */ @@ -1634,25 +1650,14 @@ r535_disp_oneinit(struct nvkm_disp *disp) return ret; } - /* */ - { - NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl; - unsigned long mask; - int i; - - ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom, - NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - mask = ctrl->displayMask; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + ret = rmapi->disp->get_supported(disp, &mask); + if (ret) + return ret; - for_each_set_bit(i, &mask, 32) { - ret = r535_outp_new(disp, i); - if (ret) - return ret; - } + for_each_set_bit(i, &mask, 32) { + ret = r535_outp_new(disp, i); + if (ret) + return ret; } ret = nvkm_event_init(&r535_disp_event, &gsp->subdev, 3, 32, &disp->rm.event); @@ -1746,6 +1751,7 @@ r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device, const struct nvkm_rm_api_disp r535_disp = { .get_static_info = r535_disp_get_static_info, + .get_supported = r535_disp_get_supported, .bl_ctrl = r535_bl_ctrl, .dp = { .set_indexed_link_rates = r535_dp_set_indexed_link_rates, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index ce04ed9e3c27..3f0cb6790f00 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -80,6 +80,7 @@ struct nvkm_rm_api { const struct nvkm_rm_api_disp { int (*get_static_info)(struct nvkm_disp *); + int (*get_supported)(struct nvkm_disp *, unsigned long *display_mask); int (*bl_ctrl)(struct nvkm_disp *, unsigned display_id, bool set, int *val); -- cgit v1.2.3 From bfbae411ed1a15aa6647351c2e1e1a3d418bdc84 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:38 +1000 Subject: drm/nouveau/gsp: add hal for disp.get_connect_state() 555.42.02 has incompatible changes to NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c | 28 +++++++++++++--------- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 1 + 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c index 40c50d9fca0b..4df5b2b72d29 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c @@ -852,10 +852,9 @@ r535_outp_dfp_get_info(struct nvkm_outp *outp) } static int -r535_outp_detect(struct nvkm_outp *outp) +r535_disp_get_connect_state(struct nvkm_disp *disp, unsigned display_id) { NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl; - struct nvkm_disp *disp = outp->disp; int ret; ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, @@ -864,23 +863,29 @@ r535_outp_detect(struct nvkm_outp *outp) return PTR_ERR(ctrl); ctrl->subDeviceInstance = 0; - ctrl->displayMask = BIT(outp->index); + ctrl->displayMask = BIT(display_id); ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return ret; - } + if (ret == 0 && (ctrl->displayMask & BIT(display_id))) + ret = 1; + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; +} - if (ctrl->displayMask & BIT(outp->index)) { +static int +r535_outp_detect(struct nvkm_outp *outp) +{ + const struct nvkm_rm_api *rmapi = outp->disp->rm.objcom.client->gsp->rm->api; + int ret; + + ret = rmapi->disp->get_connect_state(outp->disp, outp->index); + if (ret == 1) { ret = r535_outp_dfp_get_info(outp); if (ret == 0) ret = 1; - } else { - ret = 0; } - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); return ret; } @@ -1752,6 +1757,7 @@ const struct nvkm_rm_api_disp r535_disp = { .get_static_info = r535_disp_get_static_info, .get_supported = r535_disp_get_supported, + .get_connect_state = r535_disp_get_connect_state, .bl_ctrl = r535_bl_ctrl, .dp = { .set_indexed_link_rates = r535_dp_set_indexed_link_rates, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 3f0cb6790f00..58c745554544 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -81,6 +81,7 @@ struct nvkm_rm_api { const struct nvkm_rm_api_disp { int (*get_static_info)(struct nvkm_disp *); int (*get_supported)(struct nvkm_disp *, unsigned long *display_mask); + int (*get_connect_state)(struct nvkm_disp *, unsigned display_id); int (*bl_ctrl)(struct nvkm_disp *, unsigned display_id, bool set, int *val); -- cgit v1.2.3 From cf6b2b5e18d1b390cae2a05e4912c468e3de14fd Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:38 +1000 Subject: drm/nouveau/gsp: add hal for disp.get_active() 555.42.02 has incompatible changes to NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c | 7 +++++-- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c index 4df5b2b72d29..97b7e54df61f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c @@ -724,7 +724,7 @@ r535_outp_acquire(struct nvkm_outp *outp, bool hda) } static int -r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid) +r535_disp_get_active(struct nvkm_disp *disp, unsigned head, u32 *displayid) { NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl; int ret; @@ -757,7 +757,9 @@ r535_outp_inherit(struct nvkm_outp *outp) int ret; list_for_each_entry(head, &disp->heads, head) { - ret = r535_disp_head_displayid(disp, head->id, &displayid); + const struct nvkm_rm_api *rmapi = disp->rm.objcom.client->gsp->rm->api; + + ret = rmapi->disp->get_active(disp, head->id, &displayid); if (WARN_ON(ret)) return NULL; @@ -1758,6 +1760,7 @@ r535_disp = { .get_static_info = r535_disp_get_static_info, .get_supported = r535_disp_get_supported, .get_connect_state = r535_disp_get_connect_state, + .get_active = r535_disp_get_active, .bl_ctrl = r535_bl_ctrl, .dp = { .set_indexed_link_rates = r535_dp_set_indexed_link_rates, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 58c745554544..f25539401b20 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -82,6 +82,7 @@ struct nvkm_rm_api { int (*get_static_info)(struct nvkm_disp *); int (*get_supported)(struct nvkm_disp *, unsigned long *display_mask); int (*get_connect_state)(struct nvkm_disp *, unsigned display_id); + int (*get_active)(struct nvkm_disp *, unsigned head, u32 *display_id); int (*bl_ctrl)(struct nvkm_disp *, unsigned display_id, bool set, int *val); -- cgit v1.2.3 From 37a82fa33034d7d54ff049a162c732fbe4f94b93 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:38 +1000 Subject: drm/nouveau/gsp: add hal for disp.dp.get_caps() 555.42.02 has incompatible changes to NV0073_CTRL_CMD_DP_GET_CAPS. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c | 81 +++++++++++++--------- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 1 + 2 files changed, 48 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c index 97b7e54df61f..14187e1618b8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c @@ -1159,6 +1159,49 @@ r535_dp = { .dp.drive = r535_dp_drive, }; +static int +r535_dp_get_caps(struct nvkm_disp *disp, int *plink_bw, bool *pmst, bool *pwm) +{ + NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->sorIndex = ~0; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) { + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62: + *plink_bw = 0x06; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70: + *plink_bw = 0x0a; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40: + *plink_bw = 0x14; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10: + *plink_bw = 0x1e; + break; + default: + *plink_bw = 0x00; + break; + } + + *pmst = ctrl->bIsMultistreamSupported; + *pwm = ctrl->bHasIncreasedWatermarkLimits; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} + static int r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize) { @@ -1203,6 +1246,7 @@ r535_tmds = { static int r535_outp_new(struct nvkm_disp *disp, u32 id) { + const struct nvkm_rm_api *rmapi = disp->rm.objcom.client->gsp->rm->api; NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl; enum nvkm_ior_proto proto; struct dcb_output dcbE = {}; @@ -1287,43 +1331,11 @@ r535_outp_new(struct nvkm_disp *disp, u32 id) if (ret) return ret; } else { - NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl; bool mst, wm; - ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, - NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl)); - if (IS_ERR(ctrl)) - return PTR_ERR(ctrl); - - ctrl->sorIndex = ~0; - - ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); - if (ret) { - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + ret = rmapi->disp->dp.get_caps(disp, &dcbE.dpconf.link_bw, &mst, &wm); + if (ret) return ret; - } - - switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) { - case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62: - dcbE.dpconf.link_bw = 0x06; - break; - case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70: - dcbE.dpconf.link_bw = 0x0a; - break; - case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40: - dcbE.dpconf.link_bw = 0x14; - break; - case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10: - dcbE.dpconf.link_bw = 0x1e; - break; - default: - dcbE.dpconf.link_bw = 0x00; - break; - } - - mst = ctrl->bIsMultistreamSupported; - wm = ctrl->bHasIncreasedWatermarkLimits; - nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); if (WARN_ON(!dcbE.dpconf.link_bw)) return -EINVAL; @@ -1763,6 +1775,7 @@ r535_disp = { .get_active = r535_disp_get_active, .bl_ctrl = r535_bl_ctrl, .dp = { + .get_caps = r535_dp_get_caps, .set_indexed_link_rates = r535_dp_set_indexed_link_rates, }, .chan = { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index f25539401b20..01cb97f1e494 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -87,6 +87,7 @@ struct nvkm_rm_api { int (*bl_ctrl)(struct nvkm_disp *, unsigned display_id, bool set, int *val); struct { + int (*get_caps)(struct nvkm_disp *, int *link_bw, bool *mst, bool *wm); int (*set_indexed_link_rates)(struct nvkm_outp *); } dp; -- cgit v1.2.3 From 27b13dc5d0515e3c9065f14d52a491248b12a291 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 20 Nov 2024 12:59:53 +1000 Subject: drm/nouveau/gsp: add hal for fifo.chan.alloc 570.86.16 has incompatible changes to NV_CHANNEL_ALLOC_PARAMS. At the same time, remove the duplicated channel allocation code from golden context init. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c | 103 ++++++++++++--------- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c | 70 ++------------ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 7 ++ 3 files changed, 76 insertions(+), 104 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c index 58a47c62690a..645706179913 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c @@ -70,50 +70,29 @@ r535_chan_ramfc_clear(struct nvkm_chan *chan) #define CHID_PER_USERD 8 static int -r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv) +r535_chan_alloc(struct nvkm_gsp_device *device, u32 handle, u32 nv2080_engine_type, u8 runq, + bool priv, int chid, u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr, + struct nvkm_vmm *vmm, u64 gpfifo_offset, u32 gpfifo_length, + struct nvkm_gsp_object *chan) { - struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; - struct nvkm_engn *engn; - struct nvkm_device *device = fifo->engine.subdev.device; + struct nvkm_gsp *gsp = device->object.client->gsp; + struct nvkm_fifo *fifo = gsp->subdev.device->fifo; + const int userd_p = chid / CHID_PER_USERD; + const int userd_i = chid % CHID_PER_USERD; NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args; - const int userd_p = chan->id / CHID_PER_USERD; - const int userd_i = chan->id % CHID_PER_USERD; - u32 eT = ~0; - int ret; - if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) { - ret = nvkm_subdev_oneinit(&device->gr->engine.subdev); - if (ret) - return ret; - } - - nvkm_runl_foreach_engn(engn, chan->cgrp->runl) { - eT = engn->id; - break; - } - - if (WARN_ON(eT == ~0)) - return -EINVAL; - - chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev, - fifo->rm.mthdbuf_size, - &chan->rm.mthdbuf.addr, GFP_KERNEL); - if (!chan->rm.mthdbuf.ptr) - return -ENOMEM; - - args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, NVKM_RM_CHAN(chan->id), - fifo->func->chan.user.oclass, sizeof(*args), - &chan->rm.object); + args = nvkm_gsp_rm_alloc_get(&device->object, handle, + fifo->func->chan.user.oclass, sizeof(*args), chan); if (WARN_ON(IS_ERR(args))) return PTR_ERR(args); - args->gpFifoOffset = offset; - args->gpFifoEntries = length / 8; + args->gpFifoOffset = gpfifo_offset; + args->gpFifoEntries = gpfifo_length / 8; args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL); args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE); args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE); - args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, chan->runq); + args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, runq); if (!priv) args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE); else @@ -136,25 +115,25 @@ r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE); args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE); - args->hVASpace = chan->vmm->rm.object.handle; - args->engineType = eT; + args->hVASpace = vmm->rm.object.handle; + args->engineType = nv2080_engine_type; - args->instanceMem.base = chan->inst->addr; - args->instanceMem.size = chan->inst->size; + args->instanceMem.base = inst_addr; + args->instanceMem.size = fifo->func->chan.func->inst->size; args->instanceMem.addressSpace = 2; args->instanceMem.cacheAttrib = 1; - args->userdMem.base = nvkm_memory_addr(chan->userd.mem) + chan->userd.base; + args->userdMem.base = userd_addr; args->userdMem.size = fifo->func->chan.func->userd->size; args->userdMem.addressSpace = 2; args->userdMem.cacheAttrib = 1; - args->ramfcMem.base = chan->inst->addr + 0; + args->ramfcMem.base = inst_addr; args->ramfcMem.size = 0x200; args->ramfcMem.addressSpace = 2; args->ramfcMem.cacheAttrib = 1; - args->mthdbufMem.base = chan->rm.mthdbuf.addr; + args->mthdbufMem.base = mthdbuf_addr; args->mthdbufMem.size = fifo->rm.mthdbuf_size; args->mthdbufMem.addressSpace = 1; args->mthdbufMem.cacheAttrib = 0; @@ -166,7 +145,44 @@ r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE); args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE); - ret = nvkm_gsp_rm_alloc_wr(&chan->rm.object, args); + return nvkm_gsp_rm_alloc_wr(chan, args); +} + +static int +r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv) +{ + struct nvkm_fifo *fifo = chan->cgrp->runl->fifo; + struct nvkm_engn *engn; + struct nvkm_device *device = fifo->engine.subdev.device; + const struct nvkm_rm_api *rmapi = device->gsp->rm->api; + u32 eT = ~0; + int ret; + + if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) { + ret = nvkm_subdev_oneinit(&device->gr->engine.subdev); + if (ret) + return ret; + } + + nvkm_runl_foreach_engn(engn, chan->cgrp->runl) { + eT = engn->id; + break; + } + + if (WARN_ON(eT == ~0)) + return -EINVAL; + + chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev, + fifo->rm.mthdbuf_size, + &chan->rm.mthdbuf.addr, GFP_KERNEL); + if (!chan->rm.mthdbuf.ptr) + return -ENOMEM; + + ret = rmapi->fifo->chan.alloc(&chan->vmm->rm.device, NVKM_RM_CHAN(chan->id), + eT, chan->runq, priv, chan->id, chan->inst->addr, + nvkm_memory_addr(chan->userd.mem) + chan->userd.base, + chan->rm.mthdbuf.addr, chan->vmm, offset, length, + &chan->rm.object); if (ret) return ret; @@ -541,4 +557,7 @@ const struct nvkm_rm_api_fifo r535_fifo = { .xlat_rm_engine_type = r535_fifo_xlat_rm_engine_type, .ectx_size = r535_fifo_ectx_size, + .chan = { + .alloc = r535_chan_alloc, + }, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c index e0fa88aa608f..cc28de66cfa3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c @@ -298,74 +298,20 @@ r535_gr_oneinit(struct nvkm_gr *base) if (ret) goto done; - { - NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args; - - args = nvkm_gsp_rm_alloc_get(&golden.vmm->rm.device.object, NVKM_RM_CHAN(0), - device->fifo->func->chan.user.oclass, - sizeof(*args), &golden.chan); - if (IS_ERR(args)) { - ret = PTR_ERR(args); - goto done; - } - - args->gpFifoOffset = 0; - args->gpFifoEntries = 0x1000 / 8; - args->flags = - NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL) | - NVDEF(NVOS04, FLAGS, VPR, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE) | - NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, 0) | - NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE) | - NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE) | - NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, 0) | - NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE) | - NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, 0) | - NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE) | - NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE) | - NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE) | - NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE) | - NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT) | - NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE) | - NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE); - args->hVASpace = golden.vmm->rm.object.handle; - args->engineType = 1; - args->instanceMem.base = nvkm_memory_addr(golden.inst); - args->instanceMem.size = 0x1000; - args->instanceMem.addressSpace = 2; - args->instanceMem.cacheAttrib = 1; - args->ramfcMem.base = nvkm_memory_addr(golden.inst); - args->ramfcMem.size = 0x200; - args->ramfcMem.addressSpace = 2; - args->ramfcMem.cacheAttrib = 1; - args->userdMem.base = nvkm_memory_addr(golden.inst) + 0x1000; - args->userdMem.size = 0x200; - args->userdMem.addressSpace = 2; - args->userdMem.cacheAttrib = 1; - args->mthdbufMem.base = nvkm_memory_addr(golden.inst) + 0x2000; - args->mthdbufMem.size = 0x5000; - args->mthdbufMem.addressSpace = 2; - args->mthdbufMem.cacheAttrib = 1; - args->internalFlags = - NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN) | - NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE) | - NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE); - - ret = nvkm_gsp_rm_alloc_wr(&golden.chan, args); - if (ret) - goto done; - } + ret = rm->api->fifo->chan.alloc(&golden.vmm->rm.device, NVKM_RM_CHAN(0), 1, 0, true, 0, + nvkm_memory_addr(golden.inst), + nvkm_memory_addr(golden.inst) + 0x1000, + nvkm_memory_addr(golden.inst) + 0x2000, + golden.vmm, 0, 0x1000, &golden.chan); + if (ret) + goto done; /* Fetch context buffer info from RM and allocate each of them here to use * during golden context init (or later as a global context buffer). * * Also build the information that'll be used to create channel contexts. */ - ret = gsp->rm->api->gr->get_ctxbufs_info(gr); + ret = rm->api->gr->get_ctxbufs_info(gr); if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 01cb97f1e494..29663dbe99e1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -101,6 +101,13 @@ struct nvkm_rm_api { int (*xlat_rm_engine_type)(u32 rm_engine_type, enum nvkm_subdev_type *, int *nv2080_type); int (*ectx_size)(struct nvkm_fifo *); + struct { + int (*alloc)(struct nvkm_gsp_device *, u32 handle, + u32 nv2080_engine_type, u8 runq, bool priv, int chid, + u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr, + struct nvkm_vmm *, u64 gpfifo_offset, u32 gpfifo_length, + struct nvkm_gsp_object *); + } chan; } *fifo; const struct nvkm_rm_api_engine { -- cgit v1.2.3 From 3194beda36231a1faae94e6fdf900bad265df1bf Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:38 +1000 Subject: drm/nouveau/gsp: add hal for fifo.rsvd_chids 555.42.02 reserves some CHIDs for internal use. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c | 7 ++++--- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c | 3 ++- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 1 + 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c index 645706179913..a480c1a5686d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c @@ -427,13 +427,14 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo) struct nvkm_rm *rm = gsp->rm; struct nvkm_runl *runl; struct nvkm_engn *engn; - u32 cgids = 2048; u32 chids = 2048; + u32 first = rm->api->fifo->rsvd_chids; + u32 count = chids - first; int ret; NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl; - if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, cgids, 0, cgids, &fifo->cgid)) || - (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid))) + if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, first, count, &fifo->cgid)) || + (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, first, count, &fifo->chid))) return ret; ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c index cc28de66cfa3..9e97b7b1a0fb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c @@ -298,7 +298,8 @@ r535_gr_oneinit(struct nvkm_gr *base) if (ret) goto done; - ret = rm->api->fifo->chan.alloc(&golden.vmm->rm.device, NVKM_RM_CHAN(0), 1, 0, true, 0, + ret = rm->api->fifo->chan.alloc(&golden.vmm->rm.device, NVKM_RM_CHAN(0), + 1, 0, true, rm->api->fifo->rsvd_chids, nvkm_memory_addr(golden.inst), nvkm_memory_addr(golden.inst) + 0x1000, nvkm_memory_addr(golden.inst) + 0x2000, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 29663dbe99e1..a370beda3c71 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -101,6 +101,7 @@ struct nvkm_rm_api { int (*xlat_rm_engine_type)(u32 rm_engine_type, enum nvkm_subdev_type *, int *nv2080_type); int (*ectx_size)(struct nvkm_fifo *); + unsigned rsvd_chids; struct { int (*alloc)(struct nvkm_gsp_device *, u32 handle, u32 nv2080_engine_type, u8 runq, bool priv, int chid, -- cgit v1.2.3 From 8887abb8cb6fb4647fb8b1f023b612c201512150 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:39 +1000 Subject: drm/nouveau/gsp: add hal for fifo.rc_triggered() 565.57.01 has incompatible changes to rpc_rc_triggered_v17_02. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c | 34 ++++++++++++++++++++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 30 +------------------ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 1 + 3 files changed, 36 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c index a480c1a5686d..1561b5a9b6fc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c @@ -339,6 +339,39 @@ r535_runl = { .allow = r535_runl_allow, }; +static void +r535_fifo_rc_chid(struct nvkm_fifo *fifo, int chid) +{ + struct nvkm_chan *chan; + unsigned long flags; + + chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags); + if (!chan) { + nvkm_error(&fifo->engine.subdev, "rc: chid %d not found!\n", chid); + return; + } + + nvkm_chan_error(chan, false); + nvkm_chan_put(&chan, flags); +} + +static int +r535_fifo_rc_triggered(void *priv, u32 fn, void *repv, u32 repc) +{ + rpc_rc_triggered_v17_02 *msg = repv; + struct nvkm_gsp *gsp = priv; + + if (WARN_ON(repc < sizeof(*msg))) + return -EINVAL; + + nvkm_error(&gsp->subdev, "rc: engn:%08x chid:%d type:%d scope:%d part:%d\n", + msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope, + msg->partitionAttributionId); + + r535_fifo_rc_chid(gsp->subdev.device->fifo, msg->chid); + return 0; +} + static int r535_fifo_xlat_rm_engine_type(u32 rm, enum nvkm_subdev_type *ptype, int *p2080) { @@ -558,6 +591,7 @@ const struct nvkm_rm_api_fifo r535_fifo = { .xlat_rm_engine_type = r535_fifo_xlat_rm_engine_type, .ectx_size = r535_fifo_ectx_size, + .rc_triggered = r535_fifo_rc_triggered, .chan = { .alloc = r535_chan_alloc, }, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index 64e9ecf93441..b7c2a785bc58 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -926,33 +926,6 @@ r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc) return 0; } -static int -r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc) -{ - rpc_rc_triggered_v17_02 *msg = repv; - struct nvkm_gsp *gsp = priv; - struct nvkm_subdev *subdev = &gsp->subdev; - struct nvkm_chan *chan; - unsigned long flags; - - if (WARN_ON(repc < sizeof(*msg))) - return -EINVAL; - - nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n", - msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope, - msg->partitionAttributionId); - - chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid, &flags); - if (!chan) { - nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid); - return 0; - } - - nvkm_chan_error(chan, false); - nvkm_chan_put(&chan, flags); - return 0; -} - static int r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc) { @@ -2154,8 +2127,7 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, r535_gsp_msg_run_cpu_sequencer, gsp); r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp); - r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, - r535_gsp_msg_rc_triggered, gsp); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, rmapi->fifo->rc_triggered, gsp); r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, r535_gsp_msg_mmu_fault_queued, gsp); r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index a370beda3c71..1ca5b025eeb4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -102,6 +102,7 @@ struct nvkm_rm_api { enum nvkm_subdev_type *, int *nv2080_type); int (*ectx_size)(struct nvkm_fifo *); unsigned rsvd_chids; + int (*rc_triggered)(void *priv, u32 fn, void *repv, u32 repc); struct { int (*alloc)(struct nvkm_gsp_device *, u32 handle, u32 nv2080_engine_type, u8 runq, bool priv, int chid, -- cgit v1.2.3 From f82fb646e12e3f64c4576d8f537b343f16966be9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:39 +1000 Subject: drm/nouveau/gsp: add hal for disp.chan.dmac_alloc() 565.57.01 has incompatible changes to NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c | 31 ++++++++++++++-------- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 2 ++ 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c index 14187e1618b8..7e9e2d3564da 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c @@ -164,27 +164,35 @@ r535_dmac_fini(struct nvkm_disp_chan *chan) r535_chan_fini(chan); } +static int +r535_dmac_alloc(struct nvkm_disp *disp, u32 oclass, int inst, u32 put_offset, + struct nvkm_gsp_object *dmac) +{ + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args; + + args = nvkm_gsp_rm_alloc_get(&disp->rm.object, (oclass << 16) | inst, oclass, + sizeof(*args), dmac); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->channelInstance = inst; + args->offset = put_offset; + + return nvkm_gsp_rm_alloc_wr(dmac, args); +} + static int r535_dmac_init(struct nvkm_disp_chan *chan) { const struct nvkm_rm_api *rmapi = chan->disp->rm.objcom.client->gsp->rm->api; - NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args; int ret; ret = rmapi->disp->chan.set_pushbuf(chan->disp, chan->object.oclass, chan->head, chan->memory); if (ret) return ret; - args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object, - (chan->object.oclass << 16) | chan->head, - chan->object.oclass, sizeof(*args), &chan->rm.object); - if (IS_ERR(args)) - return PTR_ERR(args); - - args->channelInstance = chan->head; - args->offset = chan->suspend_put; - - return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args); + return rmapi->disp->chan.dmac_alloc(chan->disp, chan->object.oclass, chan->head, + chan->suspend_put, &chan->rm.object); } static int @@ -1780,5 +1788,6 @@ r535_disp = { }, .chan = { .set_pushbuf = r535_disp_chan_set_pushbuf, + .dmac_alloc = r535_dmac_alloc, } }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 1ca5b025eeb4..7aed7cd72e85 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -94,6 +94,8 @@ struct nvkm_rm_api { struct { int (*set_pushbuf)(struct nvkm_disp *, s32 oclass, int inst, struct nvkm_memory *); + int (*dmac_alloc)(struct nvkm_disp *, u32 oclass, int inst, u32 put_offset, + struct nvkm_gsp_object *); } chan; } *disp; -- cgit v1.2.3 From 9c86a6010ae5bbc67770d649829e8f3dc302d2b9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 1 Apr 2025 05:22:41 +1000 Subject: drm/nouveau/gsp: add hal for gsp.sr_data_size() 570.86.15 uses a slightly different calculation for the size of the sysmem buffer needed to store GSP-RM's vidmem data across suspend. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 12 ++++++++++-- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 1 + 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index b7c2a785bc58..85eb838d2a09 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -1700,6 +1700,14 @@ lvl1_fail: return ret; } +static u32 +r535_gsp_sr_data_size(struct nvkm_gsp *gsp) +{ + GspFwWprMeta *meta = gsp->wpr_meta.data; + + return meta->gspFwWprEnd - meta->gspFwWprStart; +} + int r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) { @@ -1707,8 +1715,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) int ret; if (suspend) { - GspFwWprMeta *meta = gsp->wpr_meta.data; - u64 len = meta->gspFwWprEnd - meta->gspFwWprStart; + u32 len = rm->api->gsp->sr_data_size(gsp); GspFwSRMeta *sr; ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt); @@ -2167,4 +2174,5 @@ r535_gsp = { .get_static_info = r535_gsp_get_static_info, .xlat_mc_engine_idx = r535_gsp_xlat_mc_engine_idx, .drop_send_user_shared_data = r535_gsp_drop_send_user_shared_data, + .sr_data_size = r535_gsp_sr_data_size, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 7aed7cd72e85..eb018b73d26f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -34,6 +34,7 @@ struct nvkm_rm_api { int (*get_static_info)(struct nvkm_gsp *); bool (*xlat_mc_engine_idx)(u32 mc_engine_idx, enum nvkm_subdev_type *, int *inst); void (*drop_send_user_shared_data)(struct nvkm_gsp *); + u32 (*sr_data_size)(struct nvkm_gsp *); } *gsp; const struct nvkm_rm_api_rpc { -- cgit v1.2.3 From 1b9d7b9df8f3e33a577bf00861250d3669bf2000 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 14 May 2025 09:19:56 +1000 Subject: drm/nouveau/gsp: add common client alloc code 570.144 has incompatible changes to NV0000_ALLOC_PARAMETERS. Factor out the common code so it can be shared. Signed-off-by: Ben Skeggs Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h | 17 +------- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild | 1 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c | 49 ++++++++++++++++++++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h | 1 + .../drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c | 44 +++---------------- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 3 +- 6 files changed, 59 insertions(+), 56 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 40e1b5300dff..4ad07f3ced69 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -411,21 +411,8 @@ nvkm_gsp_rm_free(struct nvkm_gsp_object *object) return 0; } -static inline int -nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) -{ - if (WARN_ON(!gsp->rm)) - return -ENOSYS; - - return gsp->rm->api->client->ctor(gsp, client); -} - -static inline void -nvkm_gsp_client_dtor(struct nvkm_gsp_client *client) -{ - if (client->gsp) - client->gsp->rm->api->client->dtor(client); -} +int nvkm_gsp_client_ctor(struct nvkm_gsp *, struct nvkm_gsp_client *); +void nvkm_gsp_client_dtor(struct nvkm_gsp_client *); static inline int nvkm_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild index e5d5f8880d31..0eac850d1f33 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild @@ -1,6 +1,7 @@ # SPDX-License-Identifier: MIT # # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. +nvkm-y += nvkm/subdev/gsp/rm/client.o nvkm-y += nvkm/subdev/gsp/rm/engine.o nvkm-y += nvkm/subdev/gsp/rm/gr.o nvkm-y += nvkm/subdev/gsp/rm/nvdec.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c new file mode 100644 index 000000000000..72d3e3ca84c2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "rm.h" + +void +nvkm_gsp_client_dtor(struct nvkm_gsp_client *client) +{ + const unsigned int id = client->object.handle - NVKM_RM_CLIENT(0); + struct nvkm_gsp *gsp = client->gsp; + + if (!gsp) + return; + + if (client->object.client) + nvkm_gsp_rm_free(&client->object); + + mutex_lock(&gsp->client_id.mutex); + idr_remove(&gsp->client_id.idr, id); + mutex_unlock(&gsp->client_id.mutex); + + client->gsp = NULL; +} + +int +nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) +{ + int id, ret; + + if (WARN_ON(!gsp->rm)) + return -ENOSYS; + + mutex_lock(&gsp->client_id.mutex); + id = idr_alloc(&gsp->client_id.idr, client, 0, NVKM_RM_CLIENT_MASK + 1, GFP_KERNEL); + mutex_unlock(&gsp->client_id.mutex); + if (id < 0) + return id; + + client->gsp = gsp; + client->object.client = client; + INIT_LIST_HEAD(&client->events); + + ret = gsp->rm->api->client->ctor(client, NVKM_RM_CLIENT(id)); + if (ret) + nvkm_gsp_client_dtor(client); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h index 50f2f2a86b5a..3bdb5ad320d7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h @@ -8,6 +8,7 @@ /* RMAPI handles for various objects allocated from GSP-RM with RM_ALLOC. */ #define NVKM_RM_CLIENT(id) (0xc1d00000 | (id)) +#define NVKM_RM_CLIENT_MASK 0x0000ffff #define NVKM_RM_DEVICE 0xde1d0000 #define NVKM_RM_SUBDEVICE 0x5d1d0000 #define NVKM_RM_DISP 0x00730000 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c index 449338da1795..ec71f683e609 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c @@ -23,57 +23,23 @@ #include "nvrm/client.h" -static void -r535_gsp_client_dtor(struct nvkm_gsp_client *client) -{ - struct nvkm_gsp *gsp = client->gsp; - - nvkm_gsp_rm_free(&client->object); - - mutex_lock(&gsp->client_id.mutex); - idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff); - mutex_unlock(&gsp->client_id.mutex); - - client->gsp = NULL; -} - static int -r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) +r535_gsp_client_ctor(struct nvkm_gsp_client *client, u32 handle) { NV0000_ALLOC_PARAMETERS *args; - int ret; - - mutex_lock(&gsp->client_id.mutex); - ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL); - mutex_unlock(&gsp->client_id.mutex); - if (ret < 0) - return ret; - client->gsp = gsp; - client->object.client = client; - INIT_LIST_HEAD(&client->events); - - args = nvkm_gsp_rm_alloc_get(&client->object, NVKM_RM_CLIENT(ret), NV01_ROOT, sizeof(*args), + args = nvkm_gsp_rm_alloc_get(&client->object, handle, NV01_ROOT, sizeof(*args), &client->object); - if (IS_ERR(args)) { - r535_gsp_client_dtor(client); - return ret; - } + if (IS_ERR(args)) + return PTR_ERR(args); args->hClient = client->object.handle; args->processID = ~0; - ret = nvkm_gsp_rm_alloc_wr(&client->object, args); - if (ret) { - r535_gsp_client_dtor(client); - return ret; - } - - return 0; + return nvkm_gsp_rm_alloc_wr(&client->object, args); } const struct nvkm_rm_api_client r535_client = { .ctor = r535_gsp_client_ctor, - .dtor = r535_gsp_client_dtor, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index eb018b73d26f..5e9d7351ecc4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -59,8 +59,7 @@ struct nvkm_rm_api { } *alloc; const struct nvkm_rm_api_client { - int (*ctor)(struct nvkm_gsp *, struct nvkm_gsp_client *); - void (*dtor)(struct nvkm_gsp_client *); + int (*ctor)(struct nvkm_gsp_client *, u32 handle); } *client; const struct nvkm_rm_api_device { -- cgit v1.2.3 From 53dac0623853457ec7564152528b686b8d5dfabe Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Nov 2024 13:02:37 +1000 Subject: drm/nouveau/gsp: add support for 570.144 Add r570-specific HAL routines, and support loading of GSP-RM version 570.144 if firmware is available. There should be no impact on r535, or non-GSP paths. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c | 7 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c | 2 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c | 2 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c | 7 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c | 31 ++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h | 17 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c | 8 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c | 16 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c | 2 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c | 5 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 10 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c | 12 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild | 9 + .../drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c | 28 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c | 263 ++++++++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c | 149 ++++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c | 217 ++++++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c | 191 +++++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c | 206 ++++++++ .../nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h | 21 + .../nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h | 355 +++++++++++++ .../nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h | 318 ++++++++++++ .../nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h | 19 + .../nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h | 213 ++++++++ .../drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h | 79 +++ .../drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h | 576 +++++++++++++++++++++ .../nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h | 57 ++ .../drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h | 17 + .../nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h | 249 +++++++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c | 28 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c | 52 ++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 36 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c | 4 + 35 files changed, 3193 insertions(+), 19 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c index 8ab02d683c90..eb765da0876e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c @@ -41,6 +41,7 @@ ad102_gsp = { static struct nvkm_gsp_fwif ad102_gsps[] = { + { 1, tu102_gsp_load, &ad102_gsp, &r570_rm_ga102, "570.144", true }, { 0, tu102_gsp_load, &ad102_gsp, &r535_rm_ga102, "535.113.01", true }, {} }; @@ -57,3 +58,9 @@ NVKM_GSP_FIRMWARE_BOOTER(ad103, 535.113.01); NVKM_GSP_FIRMWARE_BOOTER(ad104, 535.113.01); NVKM_GSP_FIRMWARE_BOOTER(ad106, 535.113.01); NVKM_GSP_FIRMWARE_BOOTER(ad107, 535.113.01); + +NVKM_GSP_FIRMWARE_BOOTER(ad102, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(ad103, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(ad104, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(ad106, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(ad107, 570.144); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c index be6bbf06d58b..3a452349afde 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c @@ -142,6 +142,8 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device, gsp->func = fwif->func; if (fwif->rm) { + nvkm_info(&gsp->subdev, "RM version: %s\n", fwif->ver); + gsp->rm = kzalloc(sizeof(*gsp->rm), GFP_KERNEL); if (!gsp->rm) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c index a6836a85b2ac..d201e8697226 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c @@ -58,6 +58,7 @@ ga100_gsp = { static struct nvkm_gsp_fwif ga100_gsps[] = { + { 1, tu102_gsp_load, &ga100_gsp, &r570_rm_tu102, "570.144" }, { 0, tu102_gsp_load, &ga100_gsp, &r535_rm_tu102, "535.113.01" }, { -1, gv100_gsp_nofw, &gv100_gsp }, {} @@ -71,3 +72,4 @@ ga100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, } NVKM_GSP_FIRMWARE_BOOTER(ga100, 535.113.01); +NVKM_GSP_FIRMWARE_BOOTER(ga100, 570.144); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c index 202b5bdc3980..917f7e2f6c46 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c @@ -174,6 +174,7 @@ ga102_gsp = { static struct nvkm_gsp_fwif ga102_gsps[] = { + { 1, tu102_gsp_load, &ga102_gsp_r535, &r570_rm_ga102, "570.144" }, { 0, tu102_gsp_load, &ga102_gsp_r535, &r535_rm_ga102, "535.113.01" }, { -1, gv100_gsp_nofw, &ga102_gsp }, {} @@ -191,3 +192,9 @@ NVKM_GSP_FIRMWARE_BOOTER(ga103, 535.113.01); NVKM_GSP_FIRMWARE_BOOTER(ga104, 535.113.01); NVKM_GSP_FIRMWARE_BOOTER(ga106, 535.113.01); NVKM_GSP_FIRMWARE_BOOTER(ga107, 535.113.01); + +NVKM_GSP_FIRMWARE_BOOTER(ga102, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(ga103, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(ga104, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(ga106, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(ga107, 570.144); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild index 0eac850d1f33..d3f4e60bb131 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild @@ -13,3 +13,4 @@ nvkm-y += nvkm/subdev/gsp/rm/ga1xx.o nvkm-y += nvkm/subdev/gsp/rm/ad10x.o include $(src)/nvkm/subdev/gsp/rm/r535/Kbuild +include $(src)/nvkm/subdev/gsp/rm/r570/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c index 22aa894da79d..f40b8fcc2bcb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c @@ -16,6 +16,34 @@ nvkm_rm_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc, return nvkm_rm_engine_obj_new(&chan->chan->rm.object, chan->chan->id, oclass, pobject); } +static int +nvkm_rm_gr_fini(struct nvkm_gr *base, bool suspend) +{ + struct nvkm_rm *rm = base->engine.subdev.device->gsp->rm; + struct r535_gr *gr = container_of(base, typeof(*gr), base); + + if (rm->api->gr->scrubber.fini) + rm->api->gr->scrubber.fini(gr); + + return 0; +} + +static int +nvkm_rm_gr_init(struct nvkm_gr *base) +{ + struct nvkm_rm *rm = base->engine.subdev.device->gsp->rm; + struct r535_gr *gr = container_of(base, typeof(*gr), base); + int ret; + + if (rm->api->gr->scrubber.init) { + ret = rm->api->gr->scrubber.init(gr); + if (ret) + return ret; + } + + return 0; +} + int nvkm_rm_gr_new(struct nvkm_rm *rm) { @@ -34,6 +62,8 @@ nvkm_rm_gr_new(struct nvkm_rm *rm) func->dtor = r535_gr_dtor; func->oneinit = r535_gr_oneinit; + func->init = nvkm_rm_gr_init; + func->fini = nvkm_rm_gr_fini; func->units = r535_gr_units; func->chan_new = r535_gr_chan_new; @@ -51,6 +81,7 @@ nvkm_rm_gr_new(struct nvkm_rm *rm) } nvkm_gr_ctor(func, rm->device, NVKM_ENGINE_GR, 0, true, &gr->base); + gr->scrubber.chid = -1; rm->device->gr = &gr->base; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h index 9f2b31651019..24980f23aab9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h @@ -34,5 +34,22 @@ struct r535_gr { int ctxbuf_nr; struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS]; + + struct { + int chid; + struct nvkm_memory *inst; + struct nvkm_vmm *vmm; + struct nvkm_gsp_object chan; + struct nvkm_gsp_object threed; + struct { + struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS]; + struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS]; + } ctxbuf; + bool enabled; + } scrubber; }; + +struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO; +void r535_gr_get_ctxbuf_info(struct r535_gr *, int i, + struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c index f72e2a7ac6bc..b8fb8150ae48 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c @@ -33,6 +33,14 @@ static void r535_bar_flush(struct nvkm_bar *bar) { + /* Use NV_PFLUSH in resume path - needed on R570 to flush writes before + * BAR2 page tables have been restored. + */ + if (unlikely(!bar->bar2)) { + g84_bar_flush(bar); + return; + } + ioread32_native(bar->flushBAR2); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c index 0e436c4fb4e0..1976d0030d17 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c @@ -48,9 +48,9 @@ struct fbsr { u64 sys_offset; }; -static int -fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper, - u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object) +int +r535_fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper, + u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object) { struct nvkm_gsp_client *client = device->object.client; struct nvkm_gsp *gsp = client->gsp; @@ -117,8 +117,8 @@ fbsr_send(struct fbsr *fbsr, struct fbsr_item *item) struct nvkm_gsp_object memlist; int ret; - ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM, - item->addr, item->size, NULL, &memlist); + ret = r535_fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM, + item->addr, item->size, NULL, &memlist); if (ret) return ret; @@ -155,8 +155,8 @@ fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size) struct nvkm_gsp_object memlist; int ret; - ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST, - 0, fbsr->size, sgt, &memlist); + ret = r535_fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST, + 0, fbsr->size, sgt, &memlist); if (ret) return ret; @@ -200,7 +200,7 @@ fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory) return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory)); } -static void +void r535_fbsr_resume(struct nvkm_gsp *gsp) { /* RM has restored VRAM contents already, so just need to free the sysmem buffer. */ diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c index 1561b5a9b6fc..4238362ec073 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c @@ -339,7 +339,7 @@ r535_runl = { .allow = r535_runl_allow, }; -static void +void r535_fifo_rc_chid(struct nvkm_fifo *fifo, int chid) { struct nvkm_chan *chan; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c index 9e97b7b1a0fb..1f5cf21f3f61 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c @@ -32,6 +32,7 @@ #include #include "nvrm/gr.h" +#include "nvrm/vmm.h" #define r535_gr(p) container_of((p), struct r535_gr, base) @@ -55,7 +56,7 @@ r535_gr_chan = { .dtor = r535_gr_chan_dtor, }; -static int +int r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm, struct nvkm_memory **pmem, struct nvkm_vma **pvma, struct nvkm_gsp_object *chan) @@ -170,7 +171,7 @@ r535_gr_units(struct nvkm_gr *gr) return (gsp->gr.tpcs << 8) | gsp->gr.gpcs; } -static void +void r535_gr_get_ctxbuf_info(struct r535_gr *gr, int i, struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO *info) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index 85eb838d2a09..ce3d4dd49ac8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -173,7 +173,7 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp) return ret; } -static void +void r535_gsp_get_static_info_fb(struct nvkm_gsp *gsp, const struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *info) { @@ -708,7 +708,7 @@ fail: } #if defined(CONFIG_ACPI) && defined(CONFIG_X86) -static void +void r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) { const guid_t NVOP_DSM_GUID = @@ -742,7 +742,7 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) kfree(argv4.buffer.pointer); } -static void +void r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) { const guid_t JT_DSM_GUID = @@ -834,7 +834,7 @@ r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux } } -static void +void r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod) { acpi_status status; @@ -2142,6 +2142,8 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL); if (rmapi->gsp->drop_send_user_shared_data) rmapi->gsp->drop_send_user_shared_data(gsp); + if (rmapi->gsp->drop_post_nocat_record) + rmapi->gsp->drop_post_nocat_record(gsp); ret = r535_gsp_rm_boot_ctor(gsp); if (ret) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c index 99af6c19a9a9..05690f745bb4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c @@ -23,8 +23,8 @@ #include "nvrm/vmm.h" -static int -r535_mmu_promote_vmm(struct nvkm_vmm *vmm) +int +r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle) { NV_VASPACE_ALLOCATION_PARAMETERS *args; int ret; @@ -34,7 +34,7 @@ r535_mmu_promote_vmm(struct nvkm_vmm *vmm) if (ret) return ret; - args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, NVKM_RM_VASPACE, FERMI_VASPACE_A, + args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, handle, FERMI_VASPACE_A, sizeof(*args), &vmm->rm.object); if (IS_ERR(args)) return PTR_ERR(args); @@ -86,6 +86,12 @@ r535_mmu_promote_vmm(struct nvkm_vmm *vmm) return ret; } +static int +r535_mmu_promote_vmm(struct nvkm_vmm *vmm) +{ + return r535_mmu_vaspace_new(vmm, NVKM_RM_VASPACE); +} + static void r535_mmu_dtor(struct nvkm_mmu *mmu) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild new file mode 100644 index 000000000000..5db0e7009e1f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: MIT +nvkm-y += nvkm/subdev/gsp/rm/r570/rm.o +nvkm-y += nvkm/subdev/gsp/rm/r570/gsp.o +nvkm-y += nvkm/subdev/gsp/rm/r570/client.o +nvkm-y += nvkm/subdev/gsp/rm/r570/fbsr.o +nvkm-y += nvkm/subdev/gsp/rm/r570/disp.o +nvkm-y += nvkm/subdev/gsp/rm/r570/fifo.o +nvkm-y += nvkm/subdev/gsp/rm/r570/gr.o +nvkm-y += nvkm/subdev/gsp/rm/r570/ofa.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c new file mode 100644 index 000000000000..87e6240662ed --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include + +#include "nvrm/client.h" + +static int +r570_gsp_client_ctor(struct nvkm_gsp_client *client, u32 handle) +{ + NV0000_ALLOC_PARAMETERS *args; + + args = nvkm_gsp_rm_alloc_get(&client->object, handle, NV01_ROOT, sizeof(*args), + &client->object); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->hClient = client->object.handle; + args->processID = ~0; + + return nvkm_gsp_rm_alloc_wr(&client->object, args); +} + +const struct nvkm_rm_api_client +r570_client = { + .ctor = r570_gsp_client_ctor, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c new file mode 100644 index 000000000000..a96e31c2d80b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include + +#include +#include + +#include "nvhw/drf.h" + +#include "nvrm/disp.h" + +static int +r570_dmac_alloc(struct nvkm_disp *disp, u32 oclass, int inst, u32 put_offset, + struct nvkm_gsp_object *dmac) +{ + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args; + + args = nvkm_gsp_rm_alloc_get(&disp->rm.object, (oclass << 16) | inst, oclass, + sizeof(*args), dmac); + if (IS_ERR(args)) + return PTR_ERR(args); + + args->channelInstance = inst; + args->offset = put_offset; + args->subDeviceId = BIT(0); + + return nvkm_gsp_rm_alloc_wr(dmac, args); +} + +static int +r570_disp_chan_set_pushbuf(struct nvkm_disp *disp, s32 oclass, int inst, struct nvkm_memory *memory) +{ + struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp; + NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + if (memory) { + switch (nvkm_memory_target(memory)) { + case NVKM_MEM_TARGET_NCOH: + ctrl->addressSpace = ADDR_SYSMEM; + ctrl->cacheSnoop = 0; + ctrl->pbTargetAperture = PHYS_PCI; + break; + case NVKM_MEM_TARGET_HOST: + ctrl->addressSpace = ADDR_SYSMEM; + ctrl->cacheSnoop = 1; + ctrl->pbTargetAperture = PHYS_PCI_COHERENT; + break; + case NVKM_MEM_TARGET_VRAM: + ctrl->addressSpace = ADDR_FBMEM; + ctrl->pbTargetAperture = PHYS_NVM; + break; + default: + WARN_ON(1); + return -EINVAL; + } + + ctrl->physicalAddr = nvkm_memory_addr(memory); + ctrl->limit = nvkm_memory_size(memory) - 1; + } + + ctrl->hclass = oclass; + ctrl->channelInstance = inst; + ctrl->valid = ((oclass & 0xff) != 0x7a) ? 1 : 0; + ctrl->subDeviceId = BIT(0); + + return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); +} + +static int +r570_dp_set_indexed_link_rates(struct nvkm_outp *outp) +{ + NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl; + struct nvkm_disp *disp = outp->disp; + + if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl))) + return -EINVAL; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->displayId = BIT(outp->index); + for (int i = 0; i < outp->dp.rates; i++) + ctrl->linkRateTbl[outp->dp.rate[i].dpcd] = outp->dp.rate[i].rate * 10 / 200; + + return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl); +} + +static int +r570_dp_get_caps(struct nvkm_disp *disp, int *plink_bw, bool *pmst, bool *pwm) +{ + NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->sorIndex = ~0; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) { + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62: + *plink_bw = 0x06; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70: + *plink_bw = 0x0a; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40: + *plink_bw = 0x14; + break; + case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10: + *plink_bw = 0x1e; + break; + default: + *plink_bw = 0x00; + break; + } + + *pmst = ctrl->bIsMultistreamSupported; + *pwm = ctrl->bHasIncreasedWatermarkLimits; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} + +static int +r570_bl_ctrl(struct nvkm_disp *disp, unsigned display_id, bool set, int *pval) +{ + u32 cmd = set ? NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS : + NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS; + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, cmd, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->displayId = BIT(display_id); + ctrl->brightness = *pval; + ctrl->brightnessType = NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) + return ret; + + *pval = ctrl->brightness; + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} + +static int +r570_disp_get_active(struct nvkm_disp *disp, unsigned head, u32 *displayid) +{ + NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->head = head; + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret) { + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; + } + + *displayid = ctrl->displayId; + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} +static int +r570_disp_get_connect_state(struct nvkm_disp *disp, unsigned display_id) +{ + NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, + NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->subDeviceInstance = 0; + ctrl->displayMask = BIT(display_id); + + ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); + if (ret == 0 && (ctrl->displayMask & BIT(display_id))) + ret = 1; + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return ret; +} + +static int +r570_disp_get_supported(struct nvkm_disp *disp, unsigned long *pmask) +{ + NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom, + NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + *pmask = ctrl->displayMask; + + nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); + return 0; +} + +static int +r570_disp_get_static_info(struct nvkm_disp *disp) +{ + NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl; + struct nvkm_gsp *gsp = disp->engine.subdev.device->gsp; + + ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + disp->wndw.mask = ctrl->windowPresentMask; + disp->wndw.nr = fls(disp->wndw.mask); + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return 0; +} + +const struct nvkm_rm_api_disp +r570_disp = { + .get_static_info = r570_disp_get_static_info, + .get_supported = r570_disp_get_supported, + .get_connect_state = r570_disp_get_connect_state, + .get_active = r570_disp_get_active, + .bl_ctrl = r570_bl_ctrl, + .dp = { + .get_caps = r570_dp_get_caps, + .set_indexed_link_rates = r570_dp_set_indexed_link_rates, + }, + .chan = { + .set_pushbuf = r570_disp_chan_set_pushbuf, + .dmac_alloc = r570_dmac_alloc, + }, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c new file mode 100644 index 000000000000..2945d5b4e570 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include +#include +#include +#include + +#include "nvrm/fbsr.h" +#include "nvrm/fifo.h" + +static int +r570_fbsr_suspend_channels(struct nvkm_gsp *gsp, bool suspend) +{ + NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->bDisableActiveChannels = suspend; + + return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); +} + +static void +r570_fbsr_resume(struct nvkm_gsp *gsp) +{ + struct nvkm_device *device = gsp->subdev.device; + struct nvkm_instmem *imem = device->imem; + struct nvkm_instobj *iobj; + struct nvkm_vmm *vmm; + + /* Restore BAR2 page tables via BAR0 window, and re-enable BAR2. */ + list_for_each_entry(iobj, &imem->boot, head) { + if (iobj->suspend) + nvkm_instobj_load(iobj); + } + + device->bar->bar2 = true; + + vmm = nvkm_bar_bar2_vmm(device); + vmm->func->flush(vmm, 0); + + /* Restore remaining BAR2 allocations (including BAR1 page tables) via BAR2. */ + list_for_each_entry(iobj, &imem->list, head) { + if (iobj->suspend) + nvkm_instobj_load(iobj); + } + + vmm = nvkm_bar_bar1_vmm(device); + vmm->func->flush(vmm, 0); + + /* Resume channel scheduling. */ + r570_fbsr_suspend_channels(device->gsp, false); + + /* Finish cleaning up. */ + r535_fbsr_resume(gsp); +} + +static int +r570_fbsr_init(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size) +{ + NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl; + struct nvkm_gsp_object memlist; + int ret; + + ret = r535_fbsr_memlist(&gsp->internal.device, 0xcaf00003, NVKM_MEM_TARGET_HOST, + 0, size, sgt, &memlist); + if (ret) + return ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->hClient = gsp->internal.client.object.handle; + ctrl->hSysMem = memlist.handle; + ctrl->sysmemAddrOfSuspendResumeData = gsp->sr.meta.addr; + ctrl->bEnteringGcoffState = 1; + + ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); + if (ret) + return ret; + + nvkm_gsp_rm_free(&memlist); + return 0; +} + +static int +r570_fbsr_suspend(struct nvkm_gsp *gsp) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_instmem *imem = device->imem; + struct nvkm_instobj *iobj; + u64 size; + int ret; + + /* Stop channel scheduling. */ + r570_fbsr_suspend_channels(gsp, true); + + /* Save BAR2 allocations to system memory. */ + list_for_each_entry(iobj, &imem->list, head) { + if (iobj->preserve) { + ret = nvkm_instobj_save(iobj); + if (ret) + return ret; + } + } + + list_for_each_entry(iobj, &imem->boot, head) { + ret = nvkm_instobj_save(iobj); + if (ret) + return ret; + } + + /* Disable BAR2 access. */ + device->bar->bar2 = false; + + /* Allocate system memory to hold RM's VRAM allocations across suspend. */ + size = gsp->fb.heap.size; + size += gsp->fb.rsvd_size; + size += gsp->fb.bios.vga_workspace.size; + nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", size); + + ret = nvkm_gsp_sg(device, size, &gsp->sr.fbsr); + if (ret) + return ret; + + /* Initialise FBSR on RM. */ + ret = r570_fbsr_init(gsp, &gsp->sr.fbsr, size); + if (ret) { + nvkm_gsp_sg_free(device, &gsp->sr.fbsr); + return ret; + } + + return 0; +} + +const struct nvkm_rm_api_fbsr +r570_fbsr = { + .suspend = r570_fbsr_suspend, + .resume = r570_fbsr_resume, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c new file mode 100644 index 000000000000..79132805cfcf --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include + +#include +#include +#include +#include + +#include "nvhw/drf.h" + +#include "nvrm/fifo.h" +#include "nvrm/engine.h" + +#define CHID_PER_USERD 8 + +static int +r570_chan_alloc(struct nvkm_gsp_device *device, u32 handle, u32 nv2080_engine_type, u8 runq, + bool priv, int chid, u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr, + struct nvkm_vmm *vmm, u64 gpfifo_offset, u32 gpfifo_length, + struct nvkm_gsp_object *chan) +{ + struct nvkm_gsp *gsp = device->object.client->gsp; + struct nvkm_fifo *fifo = gsp->subdev.device->fifo; + const int userd_p = chid / CHID_PER_USERD; + const int userd_i = chid % CHID_PER_USERD; + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args; + + args = nvkm_gsp_rm_alloc_get(&device->object, handle, + fifo->func->chan.user.oclass, sizeof(*args), chan); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->gpFifoOffset = gpfifo_offset; + args->gpFifoEntries = gpfifo_length / 8; + + args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL); + args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE); + args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, runq); + if (!priv) + args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE); + else + args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE); + args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE); + + args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE); + args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE); + + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT); + args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE); + args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE); + + args->hVASpace = vmm->rm.object.handle; + args->engineType = nv2080_engine_type; + + args->instanceMem.base = inst_addr; + args->instanceMem.size = fifo->func->chan.func->inst->size; + args->instanceMem.addressSpace = 2; + args->instanceMem.cacheAttrib = 1; + + args->userdMem.base = userd_addr; + args->userdMem.size = fifo->func->chan.func->userd->size; + args->userdMem.addressSpace = 2; + args->userdMem.cacheAttrib = 1; + + args->ramfcMem.base = inst_addr; + args->ramfcMem.size = 0x200; + args->ramfcMem.addressSpace = 2; + args->ramfcMem.cacheAttrib = 1; + + args->mthdbufMem.base = mthdbuf_addr; + args->mthdbufMem.size = fifo->rm.mthdbuf_size; + args->mthdbufMem.addressSpace = 1; + args->mthdbufMem.cacheAttrib = 0; + + if (!priv) + args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER); + else + args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN); + args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE); + args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE); + + return nvkm_gsp_rm_alloc_wr(chan, args); +} + +static int +r570_fifo_rc_triggered(void *priv, u32 fn, void *repv, u32 repc) +{ + rpc_rc_triggered_v17_02 *msg = repv; + struct nvkm_gsp *gsp = priv; + + if (WARN_ON(repc < sizeof(*msg))) + return -EINVAL; + + nvkm_error(&gsp->subdev, "rc engn:%08x chid:%d gfid:%d level:%d type:%d scope:%d part:%d " + "fault_addr:%08x%08x fault_type:%08x\n", + msg->nv2080EngineType, msg->chid, msg->gfid, msg->exceptLevel, msg->exceptType, + msg->scope, msg->partitionAttributionId, + msg->mmuFaultAddrHi, msg->mmuFaultAddrLo, msg->mmuFaultType); + + r535_fifo_rc_chid(gsp->subdev.device->fifo, msg->chid); + return 0; +} + +static int +r570_fifo_ectx_size(struct nvkm_fifo *fifo) +{ + NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl; + struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp; + struct nvkm_runl *runl; + struct nvkm_engn *engn; + + ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO, + sizeof(*ctrl)); + if (WARN_ON(IS_ERR(ctrl))) + return PTR_ERR(ctrl); + + for (int i = 0; i < ctrl->numConstructedFalcons; i++) { + nvkm_runl_foreach(runl, fifo) { + nvkm_runl_foreach_engn(engn, runl) { + if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) { + engn->rm.size = + ctrl->constructedFalconsTable[i].ctxBufferSize; + break; + } + } + } + } + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return 0; +} + +static int +r570_fifo_xlat_rm_engine_type(u32 rm, enum nvkm_subdev_type *ptype, int *p2080) +{ +#define RM_ENGINE_TYPE(RM,NVKM,INST) \ + RM_ENGINE_TYPE_##RM: \ + *ptype = NVKM_ENGINE_##NVKM; \ + *p2080 = NV2080_ENGINE_TYPE_##RM; \ + return INST + + switch (rm) { + case RM_ENGINE_TYPE( GR0, GR, 0); + case RM_ENGINE_TYPE( COPY0, CE, 0); + case RM_ENGINE_TYPE( COPY1, CE, 1); + case RM_ENGINE_TYPE( COPY2, CE, 2); + case RM_ENGINE_TYPE( COPY3, CE, 3); + case RM_ENGINE_TYPE( COPY4, CE, 4); + case RM_ENGINE_TYPE( COPY5, CE, 5); + case RM_ENGINE_TYPE( COPY6, CE, 6); + case RM_ENGINE_TYPE( COPY7, CE, 7); + case RM_ENGINE_TYPE( COPY8, CE, 8); + case RM_ENGINE_TYPE( COPY9, CE, 9); + case RM_ENGINE_TYPE( COPY10, CE, 10); + case RM_ENGINE_TYPE( COPY11, CE, 11); + case RM_ENGINE_TYPE( COPY12, CE, 12); + case RM_ENGINE_TYPE( COPY13, CE, 13); + case RM_ENGINE_TYPE( COPY14, CE, 14); + case RM_ENGINE_TYPE( COPY15, CE, 15); + case RM_ENGINE_TYPE( COPY16, CE, 16); + case RM_ENGINE_TYPE( COPY17, CE, 17); + case RM_ENGINE_TYPE( COPY18, CE, 18); + case RM_ENGINE_TYPE( COPY19, CE, 19); + case RM_ENGINE_TYPE( NVDEC0, NVDEC, 0); + case RM_ENGINE_TYPE( NVDEC1, NVDEC, 1); + case RM_ENGINE_TYPE( NVDEC2, NVDEC, 2); + case RM_ENGINE_TYPE( NVDEC3, NVDEC, 3); + case RM_ENGINE_TYPE( NVDEC4, NVDEC, 4); + case RM_ENGINE_TYPE( NVDEC5, NVDEC, 5); + case RM_ENGINE_TYPE( NVDEC6, NVDEC, 6); + case RM_ENGINE_TYPE( NVDEC7, NVDEC, 7); + case RM_ENGINE_TYPE( NVENC0, NVENC, 0); + case RM_ENGINE_TYPE( NVENC1, NVENC, 1); + case RM_ENGINE_TYPE( NVENC2, NVENC, 2); + case RM_ENGINE_TYPE( NVENC3, NVENC, 3); + case RM_ENGINE_TYPE(NVJPEG0, NVJPG, 0); + case RM_ENGINE_TYPE(NVJPEG1, NVJPG, 1); + case RM_ENGINE_TYPE(NVJPEG2, NVJPG, 2); + case RM_ENGINE_TYPE(NVJPEG3, NVJPG, 3); + case RM_ENGINE_TYPE(NVJPEG4, NVJPG, 4); + case RM_ENGINE_TYPE(NVJPEG5, NVJPG, 5); + case RM_ENGINE_TYPE(NVJPEG6, NVJPG, 6); + case RM_ENGINE_TYPE(NVJPEG7, NVJPG, 7); + case RM_ENGINE_TYPE( SW, SW, 0); + case RM_ENGINE_TYPE( SEC2, SEC2, 0); + case RM_ENGINE_TYPE( OFA0, OFA, 0); + case RM_ENGINE_TYPE( OFA1, OFA, 1); + default: + return -EINVAL; + } +#undef RM_ENGINE_TYPE +} + +const struct nvkm_rm_api_fifo +r570_fifo = { + .xlat_rm_engine_type = r570_fifo_xlat_rm_engine_type, + .ectx_size = r570_fifo_ectx_size, + .rsvd_chids = 1, + .rc_triggered = r570_fifo_rc_triggered, + .chan = { + .alloc = r570_chan_alloc, + }, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c new file mode 100644 index 000000000000..c92ec231f09a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include + +#include +#include +#include +#include + +#include "nvrm/gr.h" +#include "nvrm/engine.h" + +int +r570_gr_tpc_mask(struct nvkm_gsp *gsp, int gpc, u32 *pmask) +{ + NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS *ctrl; + int ret; + + ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->gpcId = gpc; + + ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl)); + if (ret) + return ret; + + *pmask = ctrl->tpcMask; + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return 0; +} + +int +r570_gr_gpc_mask(struct nvkm_gsp *gsp, u32 *pmask) +{ + NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO, sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + *pmask = ctrl->gpcMask; + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); + return 0; +} + +static int +r570_gr_scrubber_ctrl(struct r535_gr *gr, bool teardown) +{ + NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&gr->scrubber.vmm->rm.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->bTeardown = teardown; + + return nvkm_gsp_rm_ctrl_wr(&gr->scrubber.vmm->rm.device.subdevice, ctrl); +} + +static void +r570_gr_scrubber_fini(struct r535_gr *gr) +{ + /* Teardown scrubber channel on RM. */ + if (gr->scrubber.enabled) { + WARN_ON(r570_gr_scrubber_ctrl(gr, true)); + gr->scrubber.enabled = false; + } + + /* Free scrubber channel. */ + nvkm_gsp_rm_free(&gr->scrubber.threed); + nvkm_gsp_rm_free(&gr->scrubber.chan); + + for (int i = 0; i < gr->ctxbuf_nr; i++) { + nvkm_vmm_put(gr->scrubber.vmm, &gr->scrubber.ctxbuf.vma[i]); + nvkm_memory_unref(&gr->scrubber.ctxbuf.mem[i]); + } + + nvkm_vmm_unref(&gr->scrubber.vmm); + nvkm_memory_unref(&gr->scrubber.inst); +} + +static int +r570_gr_scrubber_init(struct r535_gr *gr) +{ + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_gsp *gsp = device->gsp; + struct nvkm_rm *rm = gsp->rm; + int ret; + + /* Scrubber channel only required on TU10x. */ + switch (device->chipset) { + case 0x162: + case 0x164: + case 0x166: + break; + default: + return 0; + } + + if (gr->scrubber.chid < 0) { + gr->scrubber.chid = nvkm_chid_get(device->fifo->chid, NULL); + if (gr->scrubber.chid < 0) + return gr->scrubber.chid; + } + + /* Allocate scrubber channel. */ + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, + 0x2000 + device->fifo->rm.mthdbuf_size, 0, true, + &gr->scrubber.inst); + if (ret) + goto done; + + ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grScrubberVmm", + &gr->scrubber.vmm); + if (ret) + goto done; + + ret = r535_mmu_vaspace_new(gr->scrubber.vmm, KGRAPHICS_SCRUBBER_HANDLE_VAS); + if (ret) + goto done; + + ret = rm->api->fifo->chan.alloc(&gr->scrubber.vmm->rm.device, KGRAPHICS_SCRUBBER_HANDLE_CHANNEL, + NV2080_ENGINE_TYPE_GR0, 0, false, gr->scrubber.chid, + nvkm_memory_addr(gr->scrubber.inst), + nvkm_memory_addr(gr->scrubber.inst) + 0x1000, + nvkm_memory_addr(gr->scrubber.inst) + 0x2000, + gr->scrubber.vmm, 0, 0x1000, &gr->scrubber.chan); + if (ret) + goto done; + + ret = r535_gr_promote_ctx(gr, false, gr->scrubber.vmm, gr->scrubber.ctxbuf.mem, + gr->scrubber.ctxbuf.vma, &gr->scrubber.chan); + if (ret) + goto done; + + ret = nvkm_gsp_rm_alloc(&gr->scrubber.chan, KGRAPHICS_SCRUBBER_HANDLE_3DOBJ, + rm->gpu->gr.class.threed, 0, &gr->scrubber.threed); + if (ret) + goto done; + + /* Initialise scrubber channel on RM. */ + ret = r570_gr_scrubber_ctrl(gr, false); + if (ret) + goto done; + + gr->scrubber.enabled = true; + +done: + if (ret) + r570_gr_scrubber_fini(gr); + + return ret; +} + +static int +r570_gr_get_ctxbufs_info(struct r535_gr *gr) +{ + NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info; + struct nvkm_subdev *subdev = &gr->base.engine.subdev; + struct nvkm_gsp *gsp = subdev->device->gsp; + + info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO, + sizeof(*info)); + if (WARN_ON(IS_ERR(info))) + return PTR_ERR(info); + + for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) + r535_gr_get_ctxbuf_info(gr, i, &info->engineContextBuffersInfo[0].engine[i]); + + nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info); + return 0; +} + +const struct nvkm_rm_api_gr +r570_gr = { + .get_ctxbufs_info = r570_gr_get_ctxbufs_info, + .scrubber.init = r570_gr_scrubber_init, + .scrubber.fini = r570_gr_scrubber_fini, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c new file mode 100644 index 000000000000..55795c49371f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include +#include + +#include + +#include "nvrm/gsp.h" +#include "nvrm/rpcfn.h" +#include "nvrm/msgfn.h" + +#include + +static u32 +r570_gsp_sr_data_size(struct nvkm_gsp *gsp) +{ + GspFwWprMeta *meta = gsp->wpr_meta.data; + + return (meta->frtsOffset + meta->frtsSize) - + (meta->nonWprHeapOffset + meta->nonWprHeapSize); +} + +static void +r570_gsp_drop_post_nocat_record(struct nvkm_gsp *gsp) +{ + if (gsp->subdev.debug < NV_DBG_DEBUG) + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD, NULL, NULL); +} + +static bool +r570_gsp_xlat_mc_engine_idx(u32 mc_engine_idx, enum nvkm_subdev_type *ptype, int *pinst) +{ + switch (mc_engine_idx) { + case MC_ENGINE_IDX_GSP: + *ptype = NVKM_SUBDEV_GSP; + *pinst = 0; + return true; + case MC_ENGINE_IDX_DISP: + *ptype = NVKM_ENGINE_DISP; + *pinst = 0; + return true; + case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE19: + *ptype = NVKM_ENGINE_CE; + *pinst = mc_engine_idx - MC_ENGINE_IDX_CE0; + return true; + case MC_ENGINE_IDX_GR0: + *ptype = NVKM_ENGINE_GR; + *pinst = 0; + return true; + case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7: + *ptype = NVKM_ENGINE_NVDEC; + *pinst = mc_engine_idx - MC_ENGINE_IDX_NVDEC0; + return true; + case MC_ENGINE_IDX_NVENC ... MC_ENGINE_IDX_NVENC3: + *ptype = NVKM_ENGINE_NVENC; + *pinst = mc_engine_idx - MC_ENGINE_IDX_NVENC; + return true; + case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7: + *ptype = NVKM_ENGINE_NVJPG; + *pinst = mc_engine_idx - MC_ENGINE_IDX_NVJPEG0; + return true; + case MC_ENGINE_IDX_OFA0 ... MC_ENGINE_IDX_OFA1: + *ptype = NVKM_ENGINE_OFA; + *pinst = mc_engine_idx - MC_ENGINE_IDX_OFA0; + return true; + default: + return false; + } +} + +static int +r570_gsp_get_static_info(struct nvkm_gsp *gsp) +{ + GspStaticConfigInfo *rpc; + u32 gpc_mask; + u32 tpc_mask; + int ret; + + rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc)); + if (IS_ERR(rpc)) + return PTR_ERR(rpc); + + gsp->internal.client.object.client = &gsp->internal.client; + gsp->internal.client.object.parent = NULL; + gsp->internal.client.object.handle = rpc->hInternalClient; + gsp->internal.client.gsp = gsp; + INIT_LIST_HEAD(&gsp->internal.client.events); + + gsp->internal.device.object.client = &gsp->internal.client; + gsp->internal.device.object.parent = &gsp->internal.client.object; + gsp->internal.device.object.handle = rpc->hInternalDevice; + + gsp->internal.device.subdevice.client = &gsp->internal.client; + gsp->internal.device.subdevice.parent = &gsp->internal.device.object; + gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice; + + gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase; + gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase; + + r535_gsp_get_static_info_fb(gsp, &rpc->fbRegionInfoParams); + + nvkm_gsp_rpc_done(gsp, rpc); + + ret = r570_gr_gpc_mask(gsp, &gpc_mask); + if (ret) + return ret; + + for (int gpc = 0; gpc < 32; gpc++) { + if (gpc_mask & BIT(gpc)) { + ret = r570_gr_tpc_mask(gsp, gpc, &tpc_mask); + if (ret) + return ret; + + gsp->gr.tpcs += hweight32(tpc_mask); + gsp->gr.gpcs++; + } + } + + return 0; +} + +static void +r570_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi) +{ +#if defined(CONFIG_ACPI) && defined(CONFIG_X86) + acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev); + + if (!handle) + return; + + acpi->bValid = 1; + + r535_gsp_acpi_dod(handle, &acpi->dodMethodData); + r535_gsp_acpi_jt(handle, &acpi->jtMethodData); + r535_gsp_acpi_caps(handle, &acpi->capsMethodData); +#endif +} + +static int +r570_gsp_set_system_info(struct nvkm_gsp *gsp) +{ + struct nvkm_device *device = gsp->subdev.device; + struct pci_dev *pdev = container_of(device, struct nvkm_device_pci, device)->pdev; + GspSystemInfo *info; + + if (WARN_ON(device->type == NVKM_DEVICE_TEGRA)) + return -ENOSYS; + + info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info)); + if (IS_ERR(info)) + return PTR_ERR(info); + + info->gpuPhysAddr = device->func->resource_addr(device, 0); + info->gpuPhysFbAddr = device->func->resource_addr(device, 1); + info->gpuPhysInstAddr = device->func->resource_addr(device, 3); + info->nvDomainBusDeviceFunc = pci_dev_id(pdev); + info->maxUserVa = TASK_SIZE; + info->pciConfigMirrorBase = 0x088000; + info->pciConfigMirrorSize = 0x001000; + info->PCIDeviceID = (pdev->device << 16) | pdev->vendor; + info->PCISubDeviceID = (pdev->subsystem_device << 16) | pdev->subsystem_vendor; + info->PCIRevisionID = pdev->revision; + r570_gsp_acpi_info(gsp, &info->acpiMethodData); + info->bIsPrimary = video_is_primary_device(device->dev); + info->bPreserveVideoMemoryAllocations = false; + + return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT); +} + +static void +r570_gsp_set_rmargs(struct nvkm_gsp *gsp, bool resume) +{ + GSP_ARGUMENTS_CACHED *args; + + args = gsp->rmargs.data; + args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr; + args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr; + args->messageQueueInitArguments.cmdQueueOffset = + (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data; + args->messageQueueInitArguments.statQueueOffset = + (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data; + + if (!resume) { + args->srInitArguments.oldLevel = 0; + args->srInitArguments.flags = 0; + args->srInitArguments.bInPMTransition = 0; + } else { + args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; + args->srInitArguments.flags = 0; + args->srInitArguments.bInPMTransition = 1; + } + + args->bDmemStack = 1; +} + +const struct nvkm_rm_api_gsp +r570_gsp = { + .set_rmargs = r570_gsp_set_rmargs, + .set_system_info = r570_gsp_set_system_info, + .get_static_info = r570_gsp_get_static_info, + .xlat_mc_engine_idx = r570_gsp_xlat_mc_engine_idx, + .drop_post_nocat_record = r570_gsp_drop_post_nocat_record, + .sr_data_size = r570_gsp_sr_data_size, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h new file mode 100644 index 000000000000..e8714e0abc37 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_CLIENT_H__ +#define __NVRM_CLIENT_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */ + +#define NV_PROC_NAME_MAX_LENGTH 100U + +typedef struct NV0000_ALLOC_PARAMETERS { + NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */ + NvU32 processID; + char processName[NV_PROC_NAME_MAX_LENGTH]; + NV_DECLARE_ALIGNED(NvP64 pOsPidInfo, 8); +} NV0000_ALLOC_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h new file mode 100644 index 000000000000..06e972835d77 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h @@ -0,0 +1,355 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_DISP_H__ +#define __NVRM_DISP_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS { + NvU32 feHwSysCap; + NvU32 windowPresentMask; + NvBool bFbRemapperEnabled; + NvU32 numHeads; + NvU32 i2cPort; + NvU32 internalDispActiveMask; + NvU32 embeddedDisplayPortMask; + NvBool bExternalMuxSupported; + NvBool bInternalMuxSupported; + NvU32 numDispChannels; +} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS; + +#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730107U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayMask; + NvU32 displayMaskDDC; +} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS; + +#define NV0073_CTRL_MAX_CONNECTORS 4U + +#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 DDCPartners; + NvU32 count; + struct { + NvU32 index; + NvU32 type; + NvU32 location; + } data[NV0073_CTRL_MAX_CONNECTORS]; + NvU32 platform; +} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS { + NvBool bDscSupported; + NvU32 encoderColorFormatMask; + NvU32 lineBufferSizeKB; + NvU32 rateBufferSizeKB; + NvU32 bitsPerPixelPrecision; + NvU32 maxNumHztSlices; + NvU32 lineBufferBitDepth; +} NV0073_CTRL_CMD_DSC_CAP_PARAMS; + +typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS { + NvU32 subDeviceInstance; + NvU32 sorIndex; + NvU32 maxLinkRate; + NvU32 dpVersionsSupported; + NvU32 UHBRSupportedByGpu; + NvU32 minPClkForCompressed; + NvBool bIsMultistreamSupported; + NvBool bIsSCEnabled; + NvBool bHasIncreasedWatermarkLimits; + NvBool bIsPC2Disabled; + NvBool isSingleHeadMSTSupported; + NvBool bFECSupported; + NvBool bIsTrainPhyRepeater; + NvBool bOverrideLinkBw; + NvBool bUseRgFlushSequence; + NvBool bSupportDPDownSpread; + NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC; +} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */ +#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0 +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1 +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0 2:2 +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0 +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0 0:0 +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5 1:1 +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0 2:2 +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0_NO (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0_YES (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U) + +#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730108U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 displayMask; + NvU32 retryTimeMs; +} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS; + +#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 UHBRSupportedByDfp; +} NV0073_CTRL_DFP_GET_INFO_PARAMS; + +#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0 +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U) +#define NV0073_CTRL_DFP_FLAGS_LANE 5:3 +#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6 +#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7 +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8 +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9 +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10 +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11 +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12 +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR 13:13 +#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14 +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15 +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16 +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17 +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_LINK 21:20 +#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22 +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23 +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25 +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26 +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30 +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS 0:0 +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS 1:1 +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS 2:2 +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS_TRUE (0x00000001U) + +#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x73010cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 flags; + NvU32 displayId; +} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 brightness; + NvBool bUncalibrated; + NvU8 brightnessType; +} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS; + +#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS { + // In + NvU32 subDeviceInstance; + NvU32 displayId; + NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES]; + + // Out + NvU16 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES]; + NvU8 linkBwCount; +} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS; + +#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DP_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 cmd; + NvU32 data; + NvU32 err; + NvU32 retryTimeMs; + NvU32 eightLaneDpcdBaseAddr; +} NV0073_CTRL_DP_CTRL_PARAMS; + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 sorIndex; + NvU32 dpLink; + + NvBool bEnableOverride; + NvBool bMST; + NvU32 singleHeadMultistreamMode; + NvU32 hBlankSym; + NvU32 vBlankSym; + NvU32 colorFormat; + NvBool bEnableTwoHeadOneOr; + + struct { + NvU32 slotStart; + NvU32 slotEnd; + NvU32 PBN; + NvU32 Timeslice; + NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT + NvU32 singleHeadMSTPipeline; + NvBool bEnableAudioOverRightPanel; + } MST; + + struct { + NvBool bEnhancedFraming; + NvU32 tuSize; + NvU32 waterMark; + NvBool bEnableAudioOverRightPanel; + } SST; +} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS; + +#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */ +typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 mute; +} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS { + NvU32 addressSpace; + NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NvU32 cacheSnoop; + NvU32 hclass; + NvU32 channelInstance; + NvBool valid; + NvU32 pbTargetAperture; + NvU32 channelPBSize; + NvU32 subDeviceId; +} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS; + +#define ADDR_SYSMEM (1) + +#define ADDR_FBMEM 2 // Frame buffer memory space + +typedef enum +{ + PB_SIZE_4KB = 0, + PB_SIZE_8KB, + PB_SIZE_16KB, + PB_SIZE_32KB, + PB_SIZE_64KB +} ChannelPBSize; + +typedef struct +{ + NvV32 channelInstance; // One of the n channel instances of a given channel type. + // Note that core channel has only one instance + // while all others have two (one per head). + NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer + NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications + NvU32 offset; // Initial offset for put/get, usually zero. + NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs + + NvU32 flags; + ChannelPBSize channelPBSize; // Size of Push Buffer requested by client (allowed values in enum) +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1 +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000 +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001 + + NvU32 subDeviceId; // One-hot encoded subDeviceId (i.e. SDM) that will be used to address the channel in the pushbuffer stream (via SSDM method) +} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS; + +#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100 1 +#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT1000 2 +#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_NITS 3 + +typedef enum +{ + IOVA, + PHYS_NVM, + PHYS_PCI, + PHYS_PCI_COHERENT +} PBTARGETAPERTURE; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h new file mode 100644 index 000000000000..7997050a4f29 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h @@ -0,0 +1,318 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_ENGINE_H__ +#define __NVRM_ENGINE_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#define MC_ENGINE_IDX_NULL 0 // This must be 0 +#define MC_ENGINE_IDX_TMR 1 +#define MC_ENGINE_IDX_DISP 2 +#define MC_ENGINE_IDX_FB 3 +#define MC_ENGINE_IDX_FIFO 4 +#define MC_ENGINE_IDX_VIDEO 5 +#define MC_ENGINE_IDX_MD 6 +#define MC_ENGINE_IDX_BUS 7 +#define MC_ENGINE_IDX_PMGR 8 +#define MC_ENGINE_IDX_VP2 9 +#define MC_ENGINE_IDX_CIPHER 10 +#define MC_ENGINE_IDX_BIF 11 +#define MC_ENGINE_IDX_PPP 12 +#define MC_ENGINE_IDX_PRIVRING 13 +#define MC_ENGINE_IDX_PMU 14 +#define MC_ENGINE_IDX_CE0 15 +#define MC_ENGINE_IDX_CE1 16 +#define MC_ENGINE_IDX_CE2 17 +#define MC_ENGINE_IDX_CE3 18 +#define MC_ENGINE_IDX_CE4 19 +#define MC_ENGINE_IDX_CE5 20 +#define MC_ENGINE_IDX_CE6 21 +#define MC_ENGINE_IDX_CE7 22 +#define MC_ENGINE_IDX_CE8 23 +#define MC_ENGINE_IDX_CE9 24 +#define MC_ENGINE_IDX_CE10 25 +#define MC_ENGINE_IDX_CE11 26 +#define MC_ENGINE_IDX_CE12 27 +#define MC_ENGINE_IDX_CE13 28 +#define MC_ENGINE_IDX_CE14 29 +#define MC_ENGINE_IDX_CE15 30 +#define MC_ENGINE_IDX_CE16 31 +#define MC_ENGINE_IDX_CE17 32 +#define MC_ENGINE_IDX_CE18 33 +#define MC_ENGINE_IDX_CE19 34 +#define MC_ENGINE_IDX_CE_MAX MC_ENGINE_IDX_CE19 +#define MC_ENGINE_IDX_VIC 35 +#define MC_ENGINE_IDX_ISOHUB 36 +#define MC_ENGINE_IDX_VGPU 37 +#define MC_ENGINE_IDX_NVENC 38 +#define MC_ENGINE_IDX_NVENC1 39 +#define MC_ENGINE_IDX_NVENC2 40 +#define MC_ENGINE_IDX_NVENC3 41 +#define MC_ENGINE_IDX_C2C 42 +#define MC_ENGINE_IDX_LTC 43 +#define MC_ENGINE_IDX_FBHUB 44 +#define MC_ENGINE_IDX_HDACODEC 45 +#define MC_ENGINE_IDX_GMMU 46 +#define MC_ENGINE_IDX_SEC2 47 +#define MC_ENGINE_IDX_FSP 48 +#define MC_ENGINE_IDX_NVLINK 49 +#define MC_ENGINE_IDX_GSP 50 +#define MC_ENGINE_IDX_NVJPG 51 +#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG +#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG +#define MC_ENGINE_IDX_NVJPEG1 52 +#define MC_ENGINE_IDX_NVJPEG2 53 +#define MC_ENGINE_IDX_NVJPEG3 54 +#define MC_ENGINE_IDX_NVJPEG4 55 +#define MC_ENGINE_IDX_NVJPEG5 56 +#define MC_ENGINE_IDX_NVJPEG6 57 +#define MC_ENGINE_IDX_NVJPEG7 58 +#define MC_ENGINE_IDX_REPLAYABLE_FAULT 59 +#define MC_ENGINE_IDX_ACCESS_CNTR 60 +#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT 61 +#define MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR 62 +#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_ERROR 63 +#define MC_ENGINE_IDX_INFO_FAULT 64 +#define MC_ENGINE_IDX_BSP 65 +#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP +#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC +#define MC_ENGINE_IDX_NVDEC1 66 +#define MC_ENGINE_IDX_NVDEC2 67 +#define MC_ENGINE_IDX_NVDEC3 68 +#define MC_ENGINE_IDX_NVDEC4 69 +#define MC_ENGINE_IDX_NVDEC5 70 +#define MC_ENGINE_IDX_NVDEC6 71 +#define MC_ENGINE_IDX_NVDEC7 72 +#define MC_ENGINE_IDX_CPU_DOORBELL 73 +#define MC_ENGINE_IDX_PRIV_DOORBELL 74 +#define MC_ENGINE_IDX_MMU_ECC_ERROR 75 +#define MC_ENGINE_IDX_BLG 76 +#define MC_ENGINE_IDX_PERFMON 77 +#define MC_ENGINE_IDX_BUF_RESET 78 +#define MC_ENGINE_IDX_XBAR 79 +#define MC_ENGINE_IDX_ZPW 80 +#define MC_ENGINE_IDX_OFA0 81 +#define MC_ENGINE_IDX_OFA1 82 +#define MC_ENGINE_IDX_TEGRA 83 +#define MC_ENGINE_IDX_GR 84 +#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR +#define MC_ENGINE_IDX_GR1 85 +#define MC_ENGINE_IDX_GR2 86 +#define MC_ENGINE_IDX_GR3 87 +#define MC_ENGINE_IDX_GR4 88 +#define MC_ENGINE_IDX_GR5 89 +#define MC_ENGINE_IDX_GR6 90 +#define MC_ENGINE_IDX_GR7 91 +#define MC_ENGINE_IDX_ESCHED 92 +#define MC_ENGINE_IDX_ESCHED__SIZE 64 +#define MC_ENGINE_IDX_GR_FECS_LOG 156 +#define MC_ENGINE_IDX_GR0_FECS_LOG MC_ENGINE_IDX_GR_FECS_LOG +#define MC_ENGINE_IDX_GR1_FECS_LOG 157 +#define MC_ENGINE_IDX_GR2_FECS_LOG 158 +#define MC_ENGINE_IDX_GR3_FECS_LOG 159 +#define MC_ENGINE_IDX_GR4_FECS_LOG 160 +#define MC_ENGINE_IDX_GR5_FECS_LOG 161 +#define MC_ENGINE_IDX_GR6_FECS_LOG 162 +#define MC_ENGINE_IDX_GR7_FECS_LOG 163 +#define MC_ENGINE_IDX_TMR_SWRL 164 +#define MC_ENGINE_IDX_DISP_GSP 165 +#define MC_ENGINE_IDX_REPLAYABLE_FAULT_CPU 166 +#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_CPU 167 +#define MC_ENGINE_IDX_PXUC 168 +#define MC_ENGINE_IDX_SYSLTC 169 +#define MC_ENGINE_IDX_LRCC 170 +#define MC_ENGINE_IDX_GSPLITE 171 +#define MC_ENGINE_IDX_GSPLITE0 MC_ENGINE_IDX_GSPLITE +#define MC_ENGINE_IDX_GSPLITE1 172 +#define MC_ENGINE_IDX_GSPLITE2 173 +#define MC_ENGINE_IDX_GSPLITE3 174 +#define MC_ENGINE_IDX_GSPLITE_MAX MC_ENGINE_IDX_GSPLITE3 +#define MC_ENGINE_IDX_DPAUX 175 +#define MC_ENGINE_IDX_DISP_LOW 176 +#define MC_ENGINE_IDX_MAX 177 + +typedef enum +{ + RM_ENGINE_TYPE_NULL = (0x00000000), + RM_ENGINE_TYPE_GR0 = (0x00000001), + RM_ENGINE_TYPE_GR1 = (0x00000002), + RM_ENGINE_TYPE_GR2 = (0x00000003), + RM_ENGINE_TYPE_GR3 = (0x00000004), + RM_ENGINE_TYPE_GR4 = (0x00000005), + RM_ENGINE_TYPE_GR5 = (0x00000006), + RM_ENGINE_TYPE_GR6 = (0x00000007), + RM_ENGINE_TYPE_GR7 = (0x00000008), + RM_ENGINE_TYPE_COPY0 = (0x00000009), + RM_ENGINE_TYPE_COPY1 = (0x0000000a), + RM_ENGINE_TYPE_COPY2 = (0x0000000b), + RM_ENGINE_TYPE_COPY3 = (0x0000000c), + RM_ENGINE_TYPE_COPY4 = (0x0000000d), + RM_ENGINE_TYPE_COPY5 = (0x0000000e), + RM_ENGINE_TYPE_COPY6 = (0x0000000f), + RM_ENGINE_TYPE_COPY7 = (0x00000010), + RM_ENGINE_TYPE_COPY8 = (0x00000011), + RM_ENGINE_TYPE_COPY9 = (0x00000012), + RM_ENGINE_TYPE_COPY10 = (0x00000013), + RM_ENGINE_TYPE_COPY11 = (0x00000014), + RM_ENGINE_TYPE_COPY12 = (0x00000015), + RM_ENGINE_TYPE_COPY13 = (0x00000016), + RM_ENGINE_TYPE_COPY14 = (0x00000017), + RM_ENGINE_TYPE_COPY15 = (0x00000018), + RM_ENGINE_TYPE_COPY16 = (0x00000019), + RM_ENGINE_TYPE_COPY17 = (0x0000001a), + RM_ENGINE_TYPE_COPY18 = (0x0000001b), + RM_ENGINE_TYPE_COPY19 = (0x0000001c), + RM_ENGINE_TYPE_NVDEC0 = (0x0000001d), + RM_ENGINE_TYPE_NVDEC1 = (0x0000001e), + RM_ENGINE_TYPE_NVDEC2 = (0x0000001f), + RM_ENGINE_TYPE_NVDEC3 = (0x00000020), + RM_ENGINE_TYPE_NVDEC4 = (0x00000021), + RM_ENGINE_TYPE_NVDEC5 = (0x00000022), + RM_ENGINE_TYPE_NVDEC6 = (0x00000023), + RM_ENGINE_TYPE_NVDEC7 = (0x00000024), + RM_ENGINE_TYPE_NVENC0 = (0x00000025), + RM_ENGINE_TYPE_NVENC1 = (0x00000026), + RM_ENGINE_TYPE_NVENC2 = (0x00000027), + // Bug 4175886 - Use this new value for all chips once GB20X is released + RM_ENGINE_TYPE_NVENC3 = (0x00000028), + RM_ENGINE_TYPE_VP = (0x00000029), + RM_ENGINE_TYPE_ME = (0x0000002a), + RM_ENGINE_TYPE_PPP = (0x0000002b), + RM_ENGINE_TYPE_MPEG = (0x0000002c), + RM_ENGINE_TYPE_SW = (0x0000002d), + RM_ENGINE_TYPE_TSEC = (0x0000002e), + RM_ENGINE_TYPE_VIC = (0x0000002f), + RM_ENGINE_TYPE_MP = (0x00000030), + RM_ENGINE_TYPE_SEC2 = (0x00000031), + RM_ENGINE_TYPE_HOST = (0x00000032), + RM_ENGINE_TYPE_DPU = (0x00000033), + RM_ENGINE_TYPE_PMU = (0x00000034), + RM_ENGINE_TYPE_FBFLCN = (0x00000035), + RM_ENGINE_TYPE_NVJPEG0 = (0x00000036), + RM_ENGINE_TYPE_NVJPEG1 = (0x00000037), + RM_ENGINE_TYPE_NVJPEG2 = (0x00000038), + RM_ENGINE_TYPE_NVJPEG3 = (0x00000039), + RM_ENGINE_TYPE_NVJPEG4 = (0x0000003a), + RM_ENGINE_TYPE_NVJPEG5 = (0x0000003b), + RM_ENGINE_TYPE_NVJPEG6 = (0x0000003c), + RM_ENGINE_TYPE_NVJPEG7 = (0x0000003d), + RM_ENGINE_TYPE_OFA0 = (0x0000003e), + RM_ENGINE_TYPE_OFA1 = (0x0000003f), + RM_ENGINE_TYPE_RESERVED40 = (0x00000040), + RM_ENGINE_TYPE_RESERVED41 = (0x00000041), + RM_ENGINE_TYPE_RESERVED42 = (0x00000042), + RM_ENGINE_TYPE_RESERVED43 = (0x00000043), + RM_ENGINE_TYPE_RESERVED44 = (0x00000044), + RM_ENGINE_TYPE_RESERVED45 = (0x00000045), + RM_ENGINE_TYPE_RESERVED46 = (0x00000046), + RM_ENGINE_TYPE_RESERVED47 = (0x00000047), + RM_ENGINE_TYPE_RESERVED48 = (0x00000048), + RM_ENGINE_TYPE_RESERVED49 = (0x00000049), + RM_ENGINE_TYPE_RESERVED4a = (0x0000004a), + RM_ENGINE_TYPE_RESERVED4b = (0x0000004b), + RM_ENGINE_TYPE_RESERVED4c = (0x0000004c), + RM_ENGINE_TYPE_RESERVED4d = (0x0000004d), + RM_ENGINE_TYPE_RESERVED4e = (0x0000004e), + RM_ENGINE_TYPE_RESERVED4f = (0x0000004f), + RM_ENGINE_TYPE_RESERVED50 = (0x00000050), + RM_ENGINE_TYPE_RESERVED51 = (0x00000051), + RM_ENGINE_TYPE_RESERVED52 = (0x00000052), + RM_ENGINE_TYPE_RESERVED53 = (0x00000053), + RM_ENGINE_TYPE_LAST = (0x00000054), +} RM_ENGINE_TYPE; + +#define NV2080_ENGINE_TYPE_NULL (0x00000000) +#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001) +#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS +#define NV2080_ENGINE_TYPE_GR1 (0x00000002) +#define NV2080_ENGINE_TYPE_GR2 (0x00000003) +#define NV2080_ENGINE_TYPE_GR3 (0x00000004) +#define NV2080_ENGINE_TYPE_GR4 (0x00000005) +#define NV2080_ENGINE_TYPE_GR5 (0x00000006) +#define NV2080_ENGINE_TYPE_GR6 (0x00000007) +#define NV2080_ENGINE_TYPE_GR7 (0x00000008) +#define NV2080_ENGINE_TYPE_COPY0 (0x00000009) +#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a) +#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b) +#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c) +#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d) +#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e) +#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f) +#define NV2080_ENGINE_TYPE_COPY7 (0x00000010) +#define NV2080_ENGINE_TYPE_COPY8 (0x00000011) +#define NV2080_ENGINE_TYPE_COPY9 (0x00000012) +#define NV2080_ENGINE_TYPE_BSP (0x00000013) +#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP +#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014) +#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015) +#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016) +#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017) +#define NV2080_ENGINE_TYPE_NVDEC5 (0x00000018) +#define NV2080_ENGINE_TYPE_NVDEC6 (0x00000019) +#define NV2080_ENGINE_TYPE_NVDEC7 (0x0000001a) +#define NV2080_ENGINE_TYPE_MSENC (0x0000001b) +#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */ +#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c) +#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d) +#define NV2080_ENGINE_TYPE_VP (0x0000001e) +#define NV2080_ENGINE_TYPE_ME (0x0000001f) +#define NV2080_ENGINE_TYPE_PPP (0x00000020) +#define NV2080_ENGINE_TYPE_MPEG (0x00000021) +#define NV2080_ENGINE_TYPE_SW (0x00000022) +#define NV2080_ENGINE_TYPE_CIPHER (0x00000023) +#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER +#define NV2080_ENGINE_TYPE_VIC (0x00000024) +#define NV2080_ENGINE_TYPE_MP (0x00000025) +#define NV2080_ENGINE_TYPE_SEC2 (0x00000026) +#define NV2080_ENGINE_TYPE_HOST (0x00000027) +#define NV2080_ENGINE_TYPE_DPU (0x00000028) +#define NV2080_ENGINE_TYPE_PMU (0x00000029) +#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a) +#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b) +#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG +#define NV2080_ENGINE_TYPE_NVJPEG1 (0x0000002c) +#define NV2080_ENGINE_TYPE_NVJPEG2 (0x0000002d) +#define NV2080_ENGINE_TYPE_NVJPEG3 (0x0000002e) +#define NV2080_ENGINE_TYPE_NVJPEG4 (0x0000002f) +#define NV2080_ENGINE_TYPE_NVJPEG5 (0x00000030) +#define NV2080_ENGINE_TYPE_NVJPEG6 (0x00000031) +#define NV2080_ENGINE_TYPE_NVJPEG7 (0x00000032) +#define NV2080_ENGINE_TYPE_OFA (0x00000033) +#define NV2080_ENGINE_TYPE_OFA0 NV2080_ENGINE_TYPE_OFA +#define NV2080_ENGINE_TYPE_COPY10 (0x00000034) +#define NV2080_ENGINE_TYPE_COPY11 (0x00000035) +#define NV2080_ENGINE_TYPE_COPY12 (0x00000036) +#define NV2080_ENGINE_TYPE_COPY13 (0x00000037) +#define NV2080_ENGINE_TYPE_COPY14 (0x00000038) +#define NV2080_ENGINE_TYPE_COPY15 (0x00000039) +#define NV2080_ENGINE_TYPE_COPY16 (0x0000003a) +#define NV2080_ENGINE_TYPE_COPY17 (0x0000003b) +#define NV2080_ENGINE_TYPE_COPY18 (0x0000003c) +#define NV2080_ENGINE_TYPE_COPY19 (0x0000003d) +#define NV2080_ENGINE_TYPE_OFA1 (0x0000003e) +#define NV2080_ENGINE_TYPE_NVENC3 (0x0000003f) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY0 (0x00000040) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY1 (0x00000041) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY2 (0x00000042) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY3 (0x00000043) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY4 (0x00000044) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY5 (0x00000045) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY6 (0x00000046) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY7 (0x00000047) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY8 (0x00000048) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY9 (0x00000049) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY10 (0x0000004a) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY11 (0x0000004b) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY12 (0x0000004c) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY13 (0x0000004d) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY14 (0x0000004e) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY15 (0x0000004f) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY16 (0x00000050) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY17 (0x00000051) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY18 (0x00000052) +#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY19 (0x00000053) +#define NV2080_ENGINE_TYPE_LAST (0x00000054) +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h new file mode 100644 index 000000000000..8af432375f7a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_FBSR_H__ +#define __NVRM_FBSR_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS { + NvHandle hClient; + NvHandle hSysMem; + NvBool bEnteringGcoffState; + NV_DECLARE_ALIGNED(NvU64 sysmemAddrOfSuspendResumeData, 8); +} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS; + +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h new file mode 100644 index 000000000000..2b002ca64e0f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h @@ -0,0 +1,213 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_FIFO_H__ +#define __NVRM_FIFO_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#define NV_MAX_SUBDEVICES 8 + +typedef struct NV_MEMORY_DESC_PARAMS { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 addressSpace; + NvU32 cacheAttrib; +} NV_MEMORY_DESC_PARAMS; + +#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U + +#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U + +typedef struct NV_CHANNEL_ALLOC_PARAMS { + + NvHandle hObjectError; // error context DMA + NvHandle hObjectBuffer; // no longer used + NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO + NvU32 gpFifoEntries; // number of GP FIFO entries + + NvU32 flags; + + + NvHandle hContextShare; // context share handle + NvHandle hVASpace; // VASpace for the channel + + // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0 + NvHandle hUserdMemory[NV_MAX_SUBDEVICES]; + + // offset to beginning of UserD within hUserdMemory[x] + NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8); + + // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated + NvU32 engineType; + // Channel identifier that is unique for the duration of a RM session + NvU32 cid; + // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods + NvU32 subDeviceId; + NvHandle hObjectEccError; // ECC error context DMA + + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8); + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8); + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8); + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8); + + NvHandle hPhysChannelGroup; // reserved + NvU32 internalFlags; // reserved + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved + NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved + NvU32 ProcessID; // reserved + NvU32 SubProcessID; // reserved + + // IV used for CPU-side encryption / GPU-side decryption. + NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved + // IV used for CPU-side decryption / GPU-side encryption. + NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved + // Nonce used CPU-side signing / GPU-side signature verification. + NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved + NvU32 tpcConfigID; // TPC Configuration Id as supported by DTD-PG Feature +} NV_CHANNEL_ALLOC_PARAMS; + +typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS; + +#define NVOS04_FLAGS_CHANNEL_TYPE 1:0 +#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000 +#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE +#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE +#define NVOS04_FLAGS_VPR 2:2 +#define NVOS04_FLAGS_VPR_FALSE 0x00000000 +#define NVOS04_FLAGS_VPR_TRUE 0x00000001 +#define NVOS04_FLAGS_CC_SECURE 2:2 +#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000 +#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002 +#define NVOS04_FLAGS_MAP_CHANNEL 30:30 +#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001 + +typedef enum { + /*! + * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by + * kernel CPU-RM clients. + */ + ERROR_NOTIFIER_TYPE_UNKNOWN = 0, + /*! @brief Error notifier is explicitly not set. + * + * The corresponding hErrorContext or hEccErrorContext must be + * NV01_NULL_OBJECT. + */ + ERROR_NOTIFIER_TYPE_NONE, + /*! @brief Error notifier is a ContextDma */ + ERROR_NOTIFIER_TYPE_CTXDMA, + /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */ + ERROR_NOTIFIER_TYPE_MEMORY +} ErrorNotifierType; + +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED 6:6 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED_NO 0x0 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED_YES 0x1 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED 7:7 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED_NO 0x0 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED_YES 0x1 + +typedef struct rpc_rc_triggered_v17_02 +{ + NvU32 nv2080EngineType; + NvU32 chid; + NvU32 gfid; + NvU32 exceptLevel; + NvU32 exceptType; + NvU32 scope; + NvU16 partitionAttributionId; + NvU32 mmuFaultAddrLo; + NvU32 mmuFaultAddrHi; + NvU32 mmuFaultType; + NvBool bCallbackNeeded; + NvU32 rcJournalBufferSize; + NvU8 rcJournalBuffer[]; +} rpc_rc_triggered_v17_02; + +#define NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS 0x40 + +typedef struct NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO { + NvU32 engDesc; + NvU32 ctxAttr; + NvU32 ctxBufferSize; + NvU32 addrSpaceList; + NvU32 registerBase; +} NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO; + +#define NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO (0x208001b0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS { + NvU32 numConstructedFalcons; + NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS]; +} NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS; + +typedef struct NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS { + NvBool bDisableActiveChannels; +} NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS_MESSAGE_ID" */ +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h new file mode 100644 index 000000000000..feed1dabd9d2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_GR_H__ +#define __NVRM_GR_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8 + +#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x1a + +typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO { + NvU32 size; + NvU32 alignment; +} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO { + NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT]; +} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS; + +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0 +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD (0x00000001) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO (0x00000002) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG (0x00000003) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE (0x00000004) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY (0x00000005) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION (0x00000006) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS (0x00000007) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL (0x00000008) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM (0x00000009) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT (0x0000000a) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT (0x0000000b) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL (0x0000000c) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL (0x0000000d) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB (0x0000000e) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV (0x0000000f) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH (0x00000010) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB (0x00000011) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB (0x00000013) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL (0x00000014) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL (0x00000015) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK (0x00000016) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT (0x00000017) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SETUP (0x00000019) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x0000001a) + +#define NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO (0x20800137U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS { + NvU32 gpcMask; +} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO (0x20800138U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS_MESSAGE_ID" */ +typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS { + NvU32 gpcId; + NvU32 tpcMask; +} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS; + +#define KGRAPHICS_SCRUBBER_HANDLE_VAS 0xdada0042 +#define KGRAPHICS_SCRUBBER_HANDLE_CHANNEL (KGRAPHICS_SCRUBBER_HANDLE_VAS + 3) +#define KGRAPHICS_SCRUBBER_HANDLE_3DOBJ (KGRAPHICS_SCRUBBER_HANDLE_VAS + 4) + +typedef struct NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS { + NvBool bTeardown; +} NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR (0x20800a46) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_KGR_INIT_BUG4208224_WAR_PARAMS_MESSAGE_ID" */ +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h new file mode 100644 index 000000000000..4685a898fac6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h @@ -0,0 +1,576 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_GSP_H__ +#define __NVRM_GSP_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U + +typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES]; + +typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NV_DECLARE_ALIGNED(NvU64 reserved, 8); + NvU32 performance; + NvBool supportCompressed; + NvBool supportISO; + NvBool bProtected; + NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList; +} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO; + +typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS { + NvU32 numFBRegions; + NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8); +} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS; + +#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23 + +#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL) + +typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS { + NvU32 index; + NvU32 flags; + NvU32 length; + NvU8 data[NV2080_GPU_MAX_GID_LENGTH]; +} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS; + +typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS { + NvU32 BoardID; + char chipSKU[9]; + char chipSKUMod[5]; + NvU32 skuConfigVersion; + char project[5]; + char projectSKU[5]; + char CDP[6]; + char projectSKUMod[2]; + NvU32 businessCycle; +} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS; + +#define MAX_GPC_COUNT 32 + +typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS { + NvU32 totalVFs; + NvU32 firstVfOffset; + NvU32 vfFeatureMask; + NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8); + NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8); + NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8); + NV_DECLARE_ALIGNED(NvU64 bar0Size, 8); + NV_DECLARE_ALIGNED(NvU64 bar1Size, 8); + NV_DECLARE_ALIGNED(NvU64 bar2Size, 8); + NvBool b64bitBar0; + NvBool b64bitBar1; + NvBool b64bitBar2; + NvBool bSriovEnabled; + NvBool bSriovHeavyEnabled; + NvBool bEmulateVFBar0TlbInvalidationRegister; + NvBool bClientRmAllocatedCtxBuffer; + NvBool bNonPowerOf2ChannelCountSupported; + NvBool bVfResizableBAR1Supported; +} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS; + +#include "engine.h" + +#define NVGPU_ENGINE_CAPS_MASK_BITS 32 + +#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1) + +#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U) + +typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS +{ + NvU32 numHeads; + NvU32 maxNumHeads; +} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS; + +typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS +{ + NvU32 headIndex; + NvU32 maxHResolution; + NvU32 maxVResolution; +} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS; + +#define MAX_GROUP_COUNT 2 + +typedef struct +{ + NvU32 ecidLow; + NvU32 ecidHigh; + NvU32 ecidExtended; +} EcidManufacturingInfo; + +typedef struct +{ + NvU64 nonWprHeapOffset; + NvU64 frtsOffset; +} FW_WPR_LAYOUT_OFFSET; + +typedef struct GspStaticConfigInfo_t +{ + NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE]; + NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo; + NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo; + NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams; + + NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps; + NvU32 sriovMaxGfid; + + NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX]; + + NvBool poisonFuseEnabled; + + NvU64 fb_length; + NvU64 fbio_mask; + NvU32 fb_bus_width; + NvU32 fb_ram_type; + NvU64 fbp_mask; + NvU32 l2_cache_size; + + NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvBool bGpuInternalSku; + NvBool bIsQuadroGeneric; + NvBool bIsQuadroAd; + NvBool bIsNvidiaNvs; + NvBool bIsVgx; + NvBool bGeforceSmb; + NvBool bIsTitan; + NvBool bIsTesla; + NvBool bIsMobile; + NvBool bIsGc6Rtd3Allowed; + NvBool bIsGc8Rtd3Allowed; + NvBool bIsGcOffRtd3Allowed; + NvBool bIsGcoffLegacyAllowed; + NvBool bIsMigSupported; + + /* "Total Board Power" refers to power requirement of GPU, + * while in GC6 state. Majority of this power will be used + * to keep V-RAM active to preserve its content. + * Some energy maybe consumed by Always-on components on GPU chip. + * This power will be provided by 3.3v voltage rail. + */ + NvU16 RTD3GC6TotalBoardPower; + + /* PERST# (i.e. PCI Express Reset) is a sideband signal + * generated by the PCIe Host to indicate the PCIe devices, + * that the power-rails and the reference-clock are stable. + * The endpoint device typically uses this signal as a global reset. + */ + NvU16 RTD3GC6PerstDelay; + + NvU64 bar1PdeBase; + NvU64 bar2PdeBase; + + NvBool bVbiosValid; + NvU32 vbiosSubVendor; + NvU32 vbiosSubDevice; + + NvBool bPageRetirementSupported; + + NvBool bSplitVasBetweenServerClientRm; + + NvBool bClRootportNeedsNosnoopWAR; + + VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads; + VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution; + NvU64 displaylessMaxPixels; + + // Client handle for internal RMAPI control. + NvHandle hInternalClient; + + // Device handle for internal RMAPI control. + NvHandle hInternalDevice; + + // Subdevice handle for internal RMAPI control. + NvHandle hInternalSubdevice; + + NvBool bSelfHostedMode; + NvBool bAtsSupported; + + NvBool bIsGpuUefi; + NvBool bIsEfiInit; + + EcidManufacturingInfo ecidInfo[MAX_GROUP_COUNT]; + + FW_WPR_LAYOUT_OFFSET fwWprLayoutOffset; +} GspStaticConfigInfo; + +typedef struct +{ + NvU16 deviceID; // deviceID + NvU16 vendorID; // vendorID + NvU16 subdeviceID; // subsystem deviceID + NvU16 subvendorID; // subsystem vendorID + NvU8 revisionID; // revision ID +} BUSINFO; + +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U) + +typedef struct DOD_METHOD_DATA +{ + NV_STATUS status; + NvU32 acpiIdListLen; + NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; +} DOD_METHOD_DATA; + +typedef struct JT_METHOD_DATA +{ + NV_STATUS status; + NvU32 jtCaps; + NvU16 jtRevId; + NvBool bSBIOSCaps; +} JT_METHOD_DATA; + +typedef struct MUX_METHOD_DATA_ELEMENT +{ + NvU32 acpiId; + NvU32 mode; + NV_STATUS status; +} MUX_METHOD_DATA_ELEMENT; + +#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U) + +typedef struct MUX_METHOD_DATA +{ + NvU32 tableLen; + MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; + MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; + MUX_METHOD_DATA_ELEMENT acpiIdMuxStateTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS]; +} MUX_METHOD_DATA; + +typedef struct CAPS_METHOD_DATA +{ + NV_STATUS status; + NvU32 optimusCaps; +} CAPS_METHOD_DATA; + +typedef struct ACPI_METHOD_DATA +{ + NvBool bValid; + DOD_METHOD_DATA dodMethodData; + JT_METHOD_DATA jtMethodData; + MUX_METHOD_DATA muxMethodData; + CAPS_METHOD_DATA capsMethodData; +} ACPI_METHOD_DATA; + +typedef struct GSP_VF_INFO +{ + NvU32 totalVFs; + NvU32 firstVFOffset; + NvU64 FirstVFBar0Address; + NvU64 FirstVFBar1Address; + NvU64 FirstVFBar2Address; + NvBool b64bitBar0; + NvBool b64bitBar1; + NvBool b64bitBar2; +} GSP_VF_INFO; + +typedef struct +{ + // Link capabilities + NvU32 linkCap; +} GSP_PCIE_CONFIG_REG; + +typedef struct GspSystemInfo +{ + NvU64 gpuPhysAddr; + NvU64 gpuPhysFbAddr; + NvU64 gpuPhysInstAddr; + NvU64 gpuPhysIoAddr; + NvU64 nvDomainBusDeviceFunc; + NvU64 simAccessBufPhysAddr; + NvU64 notifyOpSharedSurfacePhysAddr; + NvU64 pcieAtomicsOpMask; + NvU64 consoleMemSize; + NvU64 maxUserVa; + NvU32 pciConfigMirrorBase; + NvU32 pciConfigMirrorSize; + NvU32 PCIDeviceID; + NvU32 PCISubDeviceID; + NvU32 PCIRevisionID; + NvU32 pcieAtomicsCplDeviceCapMask; + NvU8 oorArch; + NvU64 clPdbProperties; + NvU32 Chipset; + NvBool bGpuBehindBridge; + NvBool bFlrSupported; + NvBool b64bBar0Supported; + NvBool bMnocAvailable; + NvU32 chipsetL1ssEnable; + NvBool bUpstreamL0sUnsupported; + NvBool bUpstreamL1Unsupported; + NvBool bUpstreamL1PorSupported; + NvBool bUpstreamL1PorMobileOnly; + NvBool bSystemHasMux; + NvU8 upstreamAddressValid; + BUSINFO FHBBusInfo; + BUSINFO chipsetIDInfo; + ACPI_METHOD_DATA acpiMethodData; + NvU32 hypervisorType; + NvBool bIsPassthru; + NvU64 sysTimerOffsetNs; + GSP_VF_INFO gspVFInfo; + NvBool bIsPrimary; + NvBool isGridBuild; + GSP_PCIE_CONFIG_REG pcieConfigReg; + NvU32 gridBuildCsp; + NvBool bPreserveVideoMemoryAllocations; + NvBool bTdrEventSupported; + NvBool bFeatureStretchVblankCapable; + NvBool bEnableDynamicGranularityPageArrays; + NvBool bClockBoostSupported; + NvBool bRouteDispIntrsToCPU; + NvU64 hostPageSize; +} GspSystemInfo; + +typedef struct rpc_os_error_log_v17_00 +{ + NvU32 exceptType; + NvU32 runlistId; + NvU32 chid; + char errString[0x100]; + NvU32 preemptiveRemovalPreviousXid; +} rpc_os_error_log_v17_00; + +typedef struct +{ + // Magic + // BL to use for verification (i.e. Booter locked it in WPR2) + NvU64 magic; // = 0xdc3aae21371a60b3; + + // Revision number of Booter-BL-Sequencer handoff interface + // Bumped up when we change this interface so it is not backward compatible. + // Bumped up when we revoke GSP-RM ucode + NvU64 revision; // = 1; + + // ---- Members regarding data in SYSMEM ---------------------------- + // Consumed by Booter for DMA + + NvU64 sysmemAddrOfRadix3Elf; + NvU64 sizeOfRadix3Elf; + + NvU64 sysmemAddrOfBootloader; + NvU64 sizeOfBootloader; + + // Offsets inside bootloader image needed by Booter + NvU64 bootloaderCodeOffset; + NvU64 bootloaderDataOffset; + NvU64 bootloaderManifestOffset; + + union + { + // Used only at initial boot + struct + { + NvU64 sysmemAddrOfSignature; + NvU64 sizeOfSignature; + }; + + // + // Used at suspend/resume to read GspFwHeapFreeList + // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart) + // + struct + { + NvU32 gspFwHeapFreeListWprOffset; + NvU32 unused0; + NvU64 unused1; + }; + }; + + // ---- Members describing FB layout -------------------------------- + NvU64 gspFwRsvdStart; + + NvU64 nonWprHeapOffset; + NvU64 nonWprHeapSize; + + NvU64 gspFwWprStart; + + // GSP-RM to use to setup heap. + NvU64 gspFwHeapOffset; + NvU64 gspFwHeapSize; + + // BL to use to find ELF for jump + NvU64 gspFwOffset; + // Size is sizeOfRadix3Elf above. + + NvU64 bootBinOffset; + // Size is sizeOfBootloader above. + + NvU64 frtsOffset; + NvU64 frtsSize; + + NvU64 gspFwWprEnd; + + // GSP-RM to use for fbRegionInfo? + NvU64 fbSize; + + // ---- Other members ----------------------------------------------- + + // GSP-RM to use for fbRegionInfo? + NvU64 vgaWorkspaceOffset; + NvU64 vgaWorkspaceSize; + + // Boot count. Used to determine whether to load the firmware image. + NvU64 bootCount; + + // This union is organized the way it is to start at an 8-byte boundary and achieve natural + // packing of the internal struct fields. + union + { + struct + { + // TODO: the partitionRpc* fields below do not really belong in this + // structure. The values are patched in by the partition bootstrapper + // when GSP-RM is booted in a partition, and this structure was a + // convenient place for the bootstrapper to access them. These should + // be moved to a different comm. mechanism between the bootstrapper + // and the GSP-RM tasks. + + // Shared partition RPC memory (physical address) + NvU64 partitionRpcAddr; + + // Offsets relative to partitionRpcAddr + NvU16 partitionRpcRequestOffset; + NvU16 partitionRpcReplyOffset; + + // Code section and dataSection offset and size. + NvU32 elfCodeOffset; + NvU32 elfDataOffset; + NvU32 elfCodeSize; + NvU32 elfDataSize; + + // Used during GSP-RM resume to check for revocation + NvU32 lsUcodeVersion; + }; + + struct + { + // Pad for the partitionRpc* fields, plus 4 bytes + NvU32 partitionRpcPadding[4]; + + // CrashCat (contiguous) buffer size/location - occupies same bytes as the + // elf(Code|Data)(Offset|Size) fields above. + // TODO: move to GSP_FMC_INIT_PARAMS + NvU64 sysmemAddrOfCrashReportQueue; + NvU32 sizeOfCrashReportQueue; + + // Pad for the lsUcodeVersion field + NvU32 lsUcodeVersionPadding[1]; + }; + }; + + // Number of VF partitions allocating sub-heaps from the WPR heap + // Used during boot to ensure the heap is adequately sized + NvU8 gspFwHeapVfPartitionCount; + + // Flags to help decide GSP-FW flow. + NvU8 flags; + + // Pad structure to exactly 256 bytes. Can replace padding with additional + // fields without incrementing revision. Padding initialized to 0. + NvU8 padding[2]; + + // + // Starts at gspFwWprEnd+frtsSize b/c FRTS is positioned + // to end where this allocation starts (when RM requests FSP to create + // FRTS). + // + NvU32 pmuReservedSize; + + // BL to use for verification (i.e. Booter says OK to boot) + NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified +} GspFwWprMeta; + +#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL + +#define GSP_FW_WPR_META_REVISION 1 + +typedef struct { + NvU64 sharedMemPhysAddr; + NvU32 pageTableEntryCount; + NvLength cmdQueueOffset; + NvLength statQueueOffset; +} MESSAGE_QUEUE_INIT_ARGUMENTS; + +typedef struct { + NvU32 oldLevel; + NvU32 flags; + NvBool bInPMTransition; +} GSP_SR_INIT_ARGUMENTS; + +typedef struct +{ + MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments; + GSP_SR_INIT_ARGUMENTS srInitArguments; + NvU32 gpuInstance; + NvBool bDmemStack; + + struct + { + NvU64 pa; + NvU64 size; + } profilerArgs; +} GSP_ARGUMENTS_CACHED; + +#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U) + +typedef struct +{ + // Magic for verification by secure ucode + NvU64 magic; // = GSP_FW_SR_META_MAGIC; + + // + // Revision number + // Bumped up when we change this interface so it is not backward compatible. + // + NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION; + + // Members regarding data in SYSMEM + NvU64 sysmemAddrOfSuspendResumeData; + NvU64 sizeOfSuspendResumeData; + + // + // Internal members for use by secure ucode + // Must be exactly GSP_FW_SR_META_INTERNAL_SIZE bytes. + // + NvU32 internal[32]; + + // Same as flags of GspFwWprMeta + NvU32 flags; + + // Subrevision number used by secure ucode + NvU32 subrevision; + + // + // Pad structure to exactly 256 bytes (1 DMA chunk). + // Padding initialized to zero. + // + NvU32 padding[22]; +} GspFwSRMeta; + +#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2 (0 << 20) // No FB heap usage + +#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL (22 << 20) + +#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X (8 << 20) // Turing thru Ada + +#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB (64u) + +#define BULLSEYE_ROOT_HEAP_ALLOC_RM_DATA_SECTION_SIZE_DELTA (12u) + +#define BULLSEYE_ROOT_HEAP_ALLOC_BAREMETAL_LIBOS_HEAP_SIZE_DELTA (70u) + +#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB \ + (88u + (BULLSEYE_ROOT_HEAP_ALLOC_RM_DATA_SECTION_SIZE_DELTA) + \ + (BULLSEYE_ROOT_HEAP_ALLOC_BAREMETAL_LIBOS_HEAP_SIZE_DELTA)) + +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h new file mode 100644 index 000000000000..e06643f57695 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_MSGFN_H__ +#define __NVRM_MSGFN_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#ifndef E +# define E(RPC, VAL) NV_VGPU_MSG_EVENT_##RPC = VAL, +# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +enum { +#endif + E(FIRST_EVENT, 0x1000) + E(GSP_INIT_DONE, 0x1001) + E(GSP_RUN_CPU_SEQUENCER, 0x1002) + E(POST_EVENT, 0x1003) + E(RC_TRIGGERED, 0x1004) + E(MMU_FAULT_QUEUED, 0x1005) + E(OS_ERROR_LOG, 0x1006) + E(RG_LINE_INTR, 0x1007) + E(GPUACCT_PERFMON_UTIL_SAMPLES, 0x1008) + E(SIM_READ, 0x1009) + E(SIM_WRITE, 0x100a) + E(SEMAPHORE_SCHEDULE_CALLBACK, 0x100b) + E(UCODE_LIBOS_PRINT, 0x100c) + E(VGPU_GSP_PLUGIN_TRIGGERED, 0x100d) + E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK, 0x100e) + E(PERF_BRIDGELESS_INFO_UPDATE, 0x100f) + E(VGPU_CONFIG, 0x1010) + E(DISPLAY_MODESET, 0x1011) + E(EXTDEV_INTR_SERVICE, 0x1012) + E(NVLINK_INBAND_RECEIVED_DATA_256, 0x1013) + E(NVLINK_INBAND_RECEIVED_DATA_512, 0x1014) + E(NVLINK_INBAND_RECEIVED_DATA_1024, 0x1015) + E(NVLINK_INBAND_RECEIVED_DATA_2048, 0x1016) + E(NVLINK_INBAND_RECEIVED_DATA_4096, 0x1017) + E(TIMED_SEMAPHORE_RELEASE, 0x1018) + E(NVLINK_IS_GPU_DEGRADED, 0x1019) + E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK, 0x101a) + E(NVLINK_FAULT_UP, 0x101b) + E(GSP_LOCKDOWN_NOTICE, 0x101c) + E(MIG_CI_CONFIG_UPDATE, 0x101d) + E(UPDATE_GSP_TRACE, 0x101e) + E(NVLINK_FATAL_ERROR_RECOVERY, 0x101f) + E(GSP_POST_NOCAT_RECORD, 0x1020) + E(FECS_ERROR, 0x1021) + E(RECOVERY_ACTION, 0x1022) + E(NUM_EVENTS, 0x1023) +#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +}; +# undef E +# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +#endif +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h new file mode 100644 index 000000000000..fcaef7f553a6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_OFA_H__ +#define __NVRM_OFA_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA? + NvU32 engineInstance; +} NV_OFA_ALLOCATION_PARAMETERS; +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h new file mode 100644 index 000000000000..2d67b598c58b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h @@ -0,0 +1,249 @@ +/* SPDX-License-Identifier: MIT */ + +/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __NVRM_RPCFN_H__ +#define __NVRM_RPCFN_H__ +#include + +/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */ + +#ifndef X +# define X(UNIT, RPC, VAL) NV_VGPU_MSG_FUNCTION_##RPC = VAL, +# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +enum { +#endif + X(RM, NOP, 0) + X(RM, SET_GUEST_SYSTEM_INFO, 1) + X(RM, ALLOC_ROOT, 2) + X(RM, ALLOC_DEVICE, 3) // deprecated + X(RM, ALLOC_MEMORY, 4) + X(RM, ALLOC_CTX_DMA, 5) + X(RM, ALLOC_CHANNEL_DMA, 6) + X(RM, MAP_MEMORY, 7) + X(RM, BIND_CTX_DMA, 8) // deprecated + X(RM, ALLOC_OBJECT, 9) + X(RM, FREE, 10) + X(RM, LOG, 11) + X(RM, ALLOC_VIDMEM, 12) + X(RM, UNMAP_MEMORY, 13) + X(RM, MAP_MEMORY_DMA, 14) + X(RM, UNMAP_MEMORY_DMA, 15) + X(RM, GET_EDID, 16) // deprecated + X(RM, ALLOC_DISP_CHANNEL, 17) + X(RM, ALLOC_DISP_OBJECT, 18) + X(RM, ALLOC_SUBDEVICE, 19) + X(RM, ALLOC_DYNAMIC_MEMORY, 20) + X(RM, DUP_OBJECT, 21) + X(RM, IDLE_CHANNELS, 22) + X(RM, ALLOC_EVENT, 23) + X(RM, SEND_EVENT, 24) // deprecated + X(RM, REMAPPER_CONTROL, 25) // deprecated + X(RM, DMA_CONTROL, 26) // deprecated + X(RM, DMA_FILL_PTE_MEM, 27) + X(RM, MANAGE_HW_RESOURCE, 28) + X(RM, BIND_ARBITRARY_CTX_DMA, 29) // deprecated + X(RM, CREATE_FB_SEGMENT, 30) + X(RM, DESTROY_FB_SEGMENT, 31) + X(RM, ALLOC_SHARE_DEVICE, 32) + X(RM, DEFERRED_API_CONTROL, 33) + X(RM, REMOVE_DEFERRED_API, 34) + X(RM, SIM_ESCAPE_READ, 35) + X(RM, SIM_ESCAPE_WRITE, 36) + X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA, 37) + X(RM, FREE_VIDMEM_VIRT, 38) + X(RM, PERF_GET_PSTATE_INFO, 39) // deprecated + X(RM, PERF_GET_PERFMON_SAMPLE, 40) + X(RM, PERF_GET_VIRTUAL_PSTATE_INFO, 41) // deprecated + X(RM, PERF_GET_LEVEL_INFO, 42) + X(RM, MAP_SEMA_MEMORY, 43) + X(RM, UNMAP_SEMA_MEMORY, 44) + X(RM, SET_SURFACE_PROPERTIES, 45) + X(RM, CLEANUP_SURFACE, 46) + X(RM, UNLOADING_GUEST_DRIVER, 47) + X(RM, TDR_SET_TIMEOUT_STATE, 48) + X(RM, SWITCH_TO_VGA, 49) + X(RM, GPU_EXEC_REG_OPS, 50) + X(RM, GET_STATIC_INFO, 51) + X(RM, ALLOC_VIRTMEM, 52) + X(RM, UPDATE_PDE_2, 53) + X(RM, SET_PAGE_DIRECTORY, 54) + X(RM, GET_STATIC_PSTATE_INFO, 55) + X(RM, TRANSLATE_GUEST_GPU_PTES, 56) + X(RM, RESERVED_57, 57) + X(RM, RESET_CURRENT_GR_CONTEXT, 58) + X(RM, SET_SEMA_MEM_VALIDATION_STATE, 59) + X(RM, GET_ENGINE_UTILIZATION, 60) + X(RM, UPDATE_GPU_PDES, 61) + X(RM, GET_ENCODER_CAPACITY, 62) + X(RM, VGPU_PF_REG_READ32, 63) // deprecated + X(RM, SET_GUEST_SYSTEM_INFO_EXT, 64) + X(GSP, GET_GSP_STATIC_INFO, 65) + X(RM, RMFS_INIT, 66) // deprecated + X(RM, RMFS_CLOSE_QUEUE, 67) // deprecated + X(RM, RMFS_CLEANUP, 68) // deprecated + X(RM, RMFS_TEST, 69) // deprecated + X(RM, UPDATE_BAR_PDE, 70) + X(RM, CONTINUATION_RECORD, 71) + X(RM, GSP_SET_SYSTEM_INFO, 72) + X(RM, SET_REGISTRY, 73) + X(GSP, GSP_INIT_POST_OBJGPU, 74) // deprecated + X(RM, SUBDEV_EVENT_SET_NOTIFICATION, 75) // deprecated + X(GSP, GSP_RM_CONTROL, 76) + X(RM, GET_STATIC_INFO2, 77) + X(RM, DUMP_PROTOBUF_COMPONENT, 78) + X(RM, UNSET_PAGE_DIRECTORY, 79) + X(RM, GET_CONSOLIDATED_STATIC_INFO, 80) // deprecated + X(RM, GMMU_REGISTER_FAULT_BUFFER, 81) // deprecated + X(RM, GMMU_UNREGISTER_FAULT_BUFFER, 82) // deprecated + X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER, 83) // deprecated + X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER, 84) // deprecated + X(RM, CTRL_SET_VGPU_FB_USAGE, 85) + X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO, 86) + X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO, 87) + X(RM, CTRL_RESET_CHANNEL, 88) + X(RM, CTRL_RESET_ISOLATED_CHANNEL, 89) + X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT, 90) + X(RM, CTRL_CLK_GET_EXTENDED_INFO, 91) + X(RM, CTRL_PERF_BOOST, 92) + X(RM, CTRL_PERF_VPSTATES_GET_CONTROL, 93) + X(RM, CTRL_GET_ZBC_CLEAR_TABLE, 94) + X(RM, CTRL_SET_ZBC_COLOR_CLEAR, 95) + X(RM, CTRL_SET_ZBC_DEPTH_CLEAR, 96) + X(RM, CTRL_GPFIFO_SCHEDULE, 97) + X(RM, CTRL_SET_TIMESLICE, 98) + X(RM, CTRL_PREEMPT, 99) + X(RM, CTRL_FIFO_DISABLE_CHANNELS, 100) + X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL, 101) + X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL, 102) + X(GSP, GSP_RM_ALLOC, 103) + X(RM, CTRL_GET_P2P_CAPS_V2, 104) + X(RM, CTRL_CIPHER_AES_ENCRYPT, 105) + X(RM, CTRL_CIPHER_SESSION_KEY, 106) + X(RM, CTRL_CIPHER_SESSION_KEY_STATUS, 107) + X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES, 108) + X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES, 109) + X(RM, CTRL_DBG_SET_EXCEPTION_MASK, 110) + X(RM, CTRL_GPU_PROMOTE_CTX, 111) + X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND, 112) + X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE, 113) + X(RM, CTRL_GR_CTXSW_ZCULL_BIND, 114) + X(RM, CTRL_GPU_INITIALIZE_CTX, 115) + X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES, 116) + X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT, 117) + X(RM, CTRL_GET_LATEST_ECC_ADDRESSES, 118) + X(RM, CTRL_MC_SERVICE_INTERRUPTS, 119) + X(RM, CTRL_DMA_SET_DEFAULT_VASPACE, 120) + X(RM, CTRL_GET_CE_PCE_MASK, 121) + X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY, 122) + X(RM, CTRL_GET_NVLINK_PEER_ID_MASK, 123) // deprecated + X(RM, CTRL_GET_NVLINK_STATUS, 124) + X(RM, CTRL_GET_P2P_CAPS, 125) + X(RM, CTRL_GET_P2P_CAPS_MATRIX, 126) + X(RM, RESERVED_0, 127) + X(RM, CTRL_RESERVE_PM_AREA_SMPC, 128) + X(RM, CTRL_RESERVE_HWPM_LEGACY, 129) + X(RM, CTRL_B0CC_EXEC_REG_OPS, 130) + X(RM, CTRL_BIND_PM_RESOURCES, 131) + X(RM, CTRL_DBG_SUSPEND_CONTEXT, 132) + X(RM, CTRL_DBG_RESUME_CONTEXT, 133) + X(RM, CTRL_DBG_EXEC_REG_OPS, 134) + X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG, 135) + X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE, 136) + X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE, 137) + X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG, 138) + X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE, 139) + X(RM, CTRL_ALLOC_PMA_STREAM, 140) + X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT, 141) + X(RM, CTRL_FB_GET_INFO_V2, 142) + X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES, 143) + X(RM, CTRL_GR_GET_CTX_BUFFER_INFO, 144) + X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES, 145) + X(RM, CTRL_GPU_EVICT_CTX, 146) + X(RM, CTRL_FB_GET_FS_INFO, 147) + X(RM, CTRL_GRMGR_GET_GR_FS_INFO, 148) + X(RM, CTRL_STOP_CHANNEL, 149) + X(RM, CTRL_GR_PC_SAMPLING_MODE, 150) + X(RM, CTRL_PERF_RATED_TDP_GET_STATUS, 151) + X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL, 152) + X(RM, CTRL_FREE_PMA_STREAM, 153) + X(RM, CTRL_TIMER_SET_GR_TICK_FREQ, 154) + X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB, 155) + X(RM, GET_CONSOLIDATED_GR_STATIC_INFO, 156) + X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP, 157) + X(RM, CTRL_GR_GET_TPC_PARTITION_MODE, 158) + X(RM, CTRL_GR_SET_TPC_PARTITION_MODE, 159) + X(UVM, UVM_PAGING_CHANNEL_ALLOCATE, 160) + X(UVM, UVM_PAGING_CHANNEL_DESTROY, 161) + X(UVM, UVM_PAGING_CHANNEL_MAP, 162) + X(UVM, UVM_PAGING_CHANNEL_UNMAP, 163) + X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM, 164) + X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES, 165) + X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION, 166) + X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL, 167) + X(RM, DCE_RM_INIT, 168) + X(RM, REGISTER_VIRTUAL_EVENT_BUFFER, 169) + X(RM, CTRL_EVENT_BUFFER_UPDATE_GET, 170) + X(RM, GET_PLCABLE_ADDRESS_KIND, 171) + X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2, 172) + X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM, 173) + X(RM, CTRL_GET_MMU_DEBUG_MODE, 174) + X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS, 175) + X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE, 176) + X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO, 177) + X(RM, DISABLE_CHANNELS, 178) + X(RM, CTRL_FABRIC_MEMORY_DESCRIBE, 179) + X(RM, CTRL_FABRIC_MEM_STATS, 180) + X(RM, SAVE_HIBERNATION_DATA, 181) + X(RM, RESTORE_HIBERNATION_DATA, 182) + X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED, 183) + X(RM, CTRL_EXEC_PARTITIONS_CREATE, 184) + X(RM, CTRL_EXEC_PARTITIONS_DELETE, 185) + X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN, 186) + X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX, 187) + X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION, 188) + X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK, 189) + X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER, 190) + X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS, 191) + X(RM, CTRL_BUS_SET_P2P_MAPPING, 192) + X(RM, CTRL_BUS_UNSET_P2P_MAPPING, 193) + X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK, 194) + X(RM, CTRL_GPU_MIGRATABLE_OPS, 195) + X(RM, CTRL_GET_TOTAL_HS_CREDITS, 196) + X(RM, CTRL_GET_HS_CREDITS, 197) + X(RM, CTRL_SET_HS_CREDITS, 198) + X(RM, CTRL_PM_AREA_PC_SAMPLER, 199) + X(RM, INVALIDATE_TLB, 200) + X(RM, CTRL_GPU_QUERY_ECC_STATUS, 201) // deprecated + X(RM, ECC_NOTIFIER_WRITE_ACK, 202) + X(RM, CTRL_DBG_GET_MODE_MMU_DEBUG, 203) + X(RM, RM_API_CONTROL, 204) + X(RM, CTRL_CMD_INTERNAL_GPU_START_FABRIC_PROBE, 205) + X(RM, CTRL_NVLINK_GET_INBAND_RECEIVED_DATA, 206) + X(RM, GET_STATIC_DATA, 207) + X(RM, RESERVED_208, 208) + X(RM, CTRL_GPU_GET_INFO_V2, 209) + X(RM, GET_BRAND_CAPS, 210) + X(RM, CTRL_CMD_NVLINK_INBAND_SEND_DATA, 211) + X(RM, UPDATE_GPM_GUEST_BUFFER_INFO, 212) + X(RM, CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE, 213) + X(RM, CTRL_SET_ZBC_STENCIL_CLEAR, 214) + X(RM, CTRL_SUBDEVICE_GET_VGPU_HEAP_STATS, 215) + X(RM, CTRL_SUBDEVICE_GET_LIBOS_HEAP_STATS, 216) + X(RM, CTRL_DBG_SET_MODE_MMU_GCC_DEBUG, 217) + X(RM, CTRL_DBG_GET_MODE_MMU_GCC_DEBUG, 218) + X(RM, CTRL_RESERVE_HES, 219) + X(RM, CTRL_RELEASE_HES, 220) + X(RM, CTRL_RESERVE_CCU_PROF, 221) + X(RM, CTRL_RELEASE_CCU_PROF, 222) + X(RM, RESERVED, 223) + X(RM, CTRL_CMD_GET_CHIPLET_HS_CREDIT_POOL, 224) + X(RM, CTRL_CMD_GET_HS_CREDITS_MAPPING, 225) + X(RM, CTRL_EXEC_PARTITIONS_EXPORT, 226) + X(RM, NUM_FUNCTIONS, 227) +#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +}; +# undef X +# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +#endif +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c new file mode 100644 index 000000000000..6fb3083edde3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include + +#include "nvrm/ofa.h" + +static int +r570_ofa_alloc(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, int inst, + struct nvkm_gsp_object *ofa) +{ + NV_OFA_ALLOCATION_PARAMETERS *args; + + args = nvkm_gsp_rm_alloc_get(parent, handle, oclass, sizeof(*args), ofa); + if (WARN_ON(IS_ERR(args))) + return PTR_ERR(args); + + args->size = sizeof(*args); + args->engineInstance = inst; + + return nvkm_gsp_rm_alloc_wr(ofa, args); +} + +const struct nvkm_rm_api_engine +r570_ofa = { + .alloc = r570_ofa_alloc, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c new file mode 100644 index 000000000000..ad80d8a3d6d3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include + +#include "nvrm/gsp.h" + +static const struct nvkm_rm_wpr +r570_wpr_libos2 = { + .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2, + .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X, + .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB, +}; + +static const struct nvkm_rm_wpr +r570_wpr_libos3 = { + .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL, + .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X, + .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, +}; + +static const struct nvkm_rm_api +r570_api = { + .gsp = &r570_gsp, + .rpc = &r535_rpc, + .ctrl = &r535_ctrl, + .alloc = &r535_alloc, + .client = &r570_client, + .device = &r535_device, + .fbsr = &r570_fbsr, + .disp = &r570_disp, + .fifo = &r570_fifo, + .ce = &r535_ce, + .gr = &r570_gr, + .nvdec = &r535_nvdec, + .nvenc = &r535_nvenc, + .nvjpg = &r535_nvjpg, + .ofa = &r570_ofa, +}; + +const struct nvkm_rm_impl +r570_rm_tu102 = { + .wpr = &r570_wpr_libos2, + .api = &r570_api, +}; + +const struct nvkm_rm_impl +r570_rm_ga102 = { + .wpr = &r570_wpr_libos3, + .api = &r570_api, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 5e9d7351ecc4..fc63ac61a9d1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -34,6 +34,7 @@ struct nvkm_rm_api { int (*get_static_info)(struct nvkm_gsp *); bool (*xlat_mc_engine_idx)(u32 mc_engine_idx, enum nvkm_subdev_type *, int *inst); void (*drop_send_user_shared_data)(struct nvkm_gsp *); + void (*drop_post_nocat_record)(struct nvkm_gsp *); u32 (*sr_data_size)(struct nvkm_gsp *); } *gsp; @@ -121,21 +122,39 @@ struct nvkm_rm_api { const struct nvkm_rm_api_gr { int (*get_ctxbufs_info)(struct r535_gr *); + struct { + int (*init)(struct r535_gr *); + void (*fini)(struct r535_gr *); + } scrubber; } *gr; - }; extern const struct nvkm_rm_impl r535_rm_tu102; extern const struct nvkm_rm_impl r535_rm_ga102; extern const struct nvkm_rm_api_gsp r535_gsp; +typedef struct DOD_METHOD_DATA DOD_METHOD_DATA; +typedef struct JT_METHOD_DATA JT_METHOD_DATA; +typedef struct CAPS_METHOD_DATA CAPS_METHOD_DATA; +void r535_gsp_acpi_dod(acpi_handle, DOD_METHOD_DATA *); +void r535_gsp_acpi_jt(acpi_handle, JT_METHOD_DATA *); +void r535_gsp_acpi_caps(acpi_handle, CAPS_METHOD_DATA *); +struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS; +void r535_gsp_get_static_info_fb(struct nvkm_gsp *, + const struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *); extern const struct nvkm_rm_api_rpc r535_rpc; extern const struct nvkm_rm_api_ctrl r535_ctrl; extern const struct nvkm_rm_api_alloc r535_alloc; extern const struct nvkm_rm_api_client r535_client; +void r535_gsp_client_dtor(struct nvkm_gsp_client *); extern const struct nvkm_rm_api_device r535_device; +int r535_mmu_vaspace_new(struct nvkm_vmm *, u32 handle); extern const struct nvkm_rm_api_fbsr r535_fbsr; +void r535_fbsr_resume(struct nvkm_gsp *); +int r535_fbsr_memlist(struct nvkm_gsp_device *, u32 handle, enum nvkm_memory_target, + u64 phys, u64 size, struct sg_table *, struct nvkm_gsp_object *); extern const struct nvkm_rm_api_disp r535_disp; extern const struct nvkm_rm_api_fifo r535_fifo; +void r535_fifo_rc_chid(struct nvkm_fifo *, int chid); extern const struct nvkm_rm_api_engine r535_ce; extern const struct nvkm_rm_api_gr r535_gr; void *r535_gr_dtor(struct nvkm_gr *); @@ -143,8 +162,23 @@ int r535_gr_oneinit(struct nvkm_gr *); u64 r535_gr_units(struct nvkm_gr *); int r535_gr_chan_new(struct nvkm_gr *, struct nvkm_chan *, const struct nvkm_oclass *, struct nvkm_object **); +int r535_gr_promote_ctx(struct r535_gr *, bool golden, struct nvkm_vmm *, + struct nvkm_memory **pctxbuf_mem, struct nvkm_vma **pctxbuf_vma, + struct nvkm_gsp_object *chan); extern const struct nvkm_rm_api_engine r535_nvdec; extern const struct nvkm_rm_api_engine r535_nvenc; extern const struct nvkm_rm_api_engine r535_nvjpg; extern const struct nvkm_rm_api_engine r535_ofa; + +extern const struct nvkm_rm_impl r570_rm_tu102; +extern const struct nvkm_rm_impl r570_rm_ga102; +extern const struct nvkm_rm_api_gsp r570_gsp; +extern const struct nvkm_rm_api_client r570_client; +extern const struct nvkm_rm_api_fbsr r570_fbsr; +extern const struct nvkm_rm_api_disp r570_disp; +extern const struct nvkm_rm_api_fifo r570_fifo; +extern const struct nvkm_rm_api_gr r570_gr; +int r570_gr_gpc_mask(struct nvkm_gsp *, u32 *mask); +int r570_gr_tpc_mask(struct nvkm_gsp *, int gpc, u32 *mask); +extern const struct nvkm_rm_api_engine r570_ofa; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c index b080a8da1caf..97c02aa93d55 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c @@ -427,6 +427,7 @@ done: static struct nvkm_gsp_fwif tu102_gsps[] = { + { 1, tu102_gsp_load, &tu102_gsp, &r570_rm_tu102, "570.144" }, { 0, tu102_gsp_load, &tu102_gsp, &r535_rm_tu102, "535.113.01" }, { -1, gv100_gsp_nofw, &gv100_gsp }, {} @@ -442,3 +443,7 @@ tu102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, NVKM_GSP_FIRMWARE_BOOTER(tu102, 535.113.01); NVKM_GSP_FIRMWARE_BOOTER(tu104, 535.113.01); NVKM_GSP_FIRMWARE_BOOTER(tu106, 535.113.01); + +NVKM_GSP_FIRMWARE_BOOTER(tu102, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(tu104, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(tu106, 570.144); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c index 9e897bdcb647..97eb046c25d0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c @@ -41,6 +41,7 @@ tu116_gsp = { static struct nvkm_gsp_fwif tu116_gsps[] = { + { 1, tu102_gsp_load, &tu116_gsp, &r570_rm_tu102, "570.144" }, { 0, tu102_gsp_load, &tu116_gsp, &r535_rm_tu102, "535.113.01" }, { -1, gv100_gsp_nofw, &gv100_gsp }, {} @@ -55,3 +56,6 @@ tu116_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, NVKM_GSP_FIRMWARE_BOOTER(tu116, 535.113.01); NVKM_GSP_FIRMWARE_BOOTER(tu117, 535.113.01); + +NVKM_GSP_FIRMWARE_BOOTER(tu116, 570.144); +NVKM_GSP_FIRMWARE_BOOTER(tu117, 570.144); -- cgit v1.2.3 From 2f89bb3264af414a96ef875035c6c5afae01c7da Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 31 Jan 2025 01:56:33 +1000 Subject: drm/nouveau/pci: add PRI address of config space mirror to nvkm_pci_func These registers have moved on GH100/GBxxx, and the GSP-RM init code uses hardcoded values from earlier GPUs to fill GspSystemInfo. Replace the per-GPU accessors in nvkm_pci_func with region info, and use it when initialising GspSystemInfo. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 5 +++-- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c | 5 +++-- drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c | 10 ++++----- drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c | 5 ++--- drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c | 5 ++--- drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c | 5 ++--- drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c | 5 ++--- drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c | 5 ++--- drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c | 5 ++--- drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c | 4 +--- drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c | 25 +--------------------- drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c | 25 +--------------------- drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c | 4 +--- drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c | 4 +--- drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h | 11 +++++----- 15 files changed, 33 insertions(+), 90 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index ce3d4dd49ac8..e2171d0d25be 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -24,6 +24,7 @@ #include "priv.h" #include +#include #include #include #include @@ -905,8 +906,8 @@ r535_gsp_set_system_info(struct nvkm_gsp *gsp) info->gpuPhysInstAddr = device->func->resource_addr(device, 3); info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev); info->maxUserVa = TASK_SIZE; - info->pciConfigMirrorBase = 0x088000; - info->pciConfigMirrorSize = 0x001000; + info->pciConfigMirrorBase = device->pci->func->cfg.addr; + info->pciConfigMirrorSize = device->pci->func->cfg.size; r535_gsp_acpi_info(gsp, &info->acpiMethodData); return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c index 55795c49371f..a3c070d41923 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c @@ -12,6 +12,7 @@ #include "nvrm/msgfn.h" #include +#include static u32 r570_gsp_sr_data_size(struct nvkm_gsp *gsp) @@ -157,8 +158,8 @@ r570_gsp_set_system_info(struct nvkm_gsp *gsp) info->gpuPhysInstAddr = device->func->resource_addr(device, 3); info->nvDomainBusDeviceFunc = pci_dev_id(pdev); info->maxUserVa = TASK_SIZE; - info->pciConfigMirrorBase = 0x088000; - info->pciConfigMirrorSize = 0x001000; + info->pciConfigMirrorBase = device->pci->func->cfg.addr; + info->pciConfigMirrorSize = device->pci->func->cfg.size; info->PCIDeviceID = (pdev->device << 16) | pdev->vendor; info->PCISubDeviceID = (pdev->subsystem_device << 16) | pdev->subsystem_vendor; info->PCIRevisionID = pdev->revision; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c index 5a0de45d36ce..6867934256a7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c @@ -39,26 +39,26 @@ nvkm_pci_msi_rearm(struct nvkm_device *device) u32 nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr) { - return pci->func->rd32(pci, addr); + return nvkm_rd32(pci->subdev.device, pci->func->cfg.addr + addr); } void nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data) { - pci->func->wr08(pci, addr, data); + nvkm_wr08(pci->subdev.device, pci->func->cfg.addr + addr, data); } void nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data) { - pci->func->wr32(pci, addr, data); + nvkm_wr32(pci->subdev.device, pci->func->cfg.addr + addr, data); } u32 nvkm_pci_mask(struct nvkm_pci *pci, u16 addr, u32 mask, u32 value) { - u32 data = pci->func->rd32(pci, addr); - pci->func->wr32(pci, addr, (data & ~mask) | value); + u32 data = nvkm_pci_rd32(pci, addr); + nvkm_pci_wr32(pci, addr, (data & ~mask) | value); return data; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c index 5b29aacedef3..5308f6539a3f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c @@ -132,10 +132,9 @@ g84_pcie_init(struct nvkm_pci *pci) static const struct nvkm_pci_func g84_pci_func = { + .cfg = { .addr = 0x088000, .size = 0x1000 }, + .init = g84_pci_init, - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, .msi_rearm = nv46_pci_msi_rearm, .pcie.init = g84_pcie_init, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c index a9e0674009c6..8ae7aa02e675 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c @@ -33,10 +33,9 @@ g92_pcie_version_supported(struct nvkm_pci *pci) static const struct nvkm_pci_func g92_pci_func = { + .cfg = { .addr = 0x088000, .size = 0x1000 }, + .init = g84_pci_init, - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, .msi_rearm = nv46_pci_msi_rearm, .pcie.init = g84_pcie_init, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c index 7bacd0693283..df745d0690ca 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c @@ -25,10 +25,9 @@ static const struct nvkm_pci_func g94_pci_func = { + .cfg = { .addr = 0x088000, .size = 0x1000 }, + .init = g84_pci_init, - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, .msi_rearm = nv40_pci_msi_rearm, .pcie.init = g84_pcie_init, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c index 099906092fe1..6ce941df87b7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c @@ -78,10 +78,9 @@ gf100_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width) static const struct nvkm_pci_func gf100_pci_func = { + .cfg = { .addr = 0x088000, .size = 0x1000 }, + .init = g84_pci_init, - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, .msi_rearm = gf100_pci_msi_rearm, .pcie.init = gf100_pcie_init, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c index bcde609ba866..712ca7e0959a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c @@ -25,10 +25,9 @@ static const struct nvkm_pci_func gf106_pci_func = { + .cfg = { .addr = 0x088000, .size = 0x1000 }, + .init = g84_pci_init, - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, .msi_rearm = nv40_pci_msi_rearm, .pcie.init = gf100_pcie_init, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c index 6be87ecffc89..ec6d0a7de995 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c @@ -204,10 +204,9 @@ gk104_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width) static const struct nvkm_pci_func gk104_pci_func = { + .cfg = { .addr = 0x088000, .size = 0x1000 }, + .init = g84_pci_init, - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, .msi_rearm = nv40_pci_msi_rearm, .pcie.init = gk104_pcie_init, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c index a5fafda0014d..4204316a544f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c @@ -31,9 +31,7 @@ gp100_pci_msi_rearm(struct nvkm_pci *pci) static const struct nvkm_pci_func gp100_pci_func = { - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, + .cfg = { .addr = 0x088000, .size = 0x1000 }, .msi_rearm = gp100_pci_msi_rearm, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c index 9ab64194b185..b8a3f6850fa7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c @@ -23,32 +23,9 @@ */ #include "priv.h" -static u32 -nv04_pci_rd32(struct nvkm_pci *pci, u16 addr) -{ - struct nvkm_device *device = pci->subdev.device; - return nvkm_rd32(device, 0x001800 + addr); -} - -static void -nv04_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data) -{ - struct nvkm_device *device = pci->subdev.device; - nvkm_wr08(device, 0x001800 + addr, data); -} - -static void -nv04_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data) -{ - struct nvkm_device *device = pci->subdev.device; - nvkm_wr32(device, 0x001800 + addr, data); -} - static const struct nvkm_pci_func nv04_pci_func = { - .rd32 = nv04_pci_rd32, - .wr08 = nv04_pci_wr08, - .wr32 = nv04_pci_wr32, + .cfg = { .addr = 0x001800, .size = 0x1000 }, }; int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c index 6a3c31cf0200..1971dbbdeb2b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c @@ -23,27 +23,6 @@ */ #include "priv.h" -u32 -nv40_pci_rd32(struct nvkm_pci *pci, u16 addr) -{ - struct nvkm_device *device = pci->subdev.device; - return nvkm_rd32(device, 0x088000 + addr); -} - -void -nv40_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data) -{ - struct nvkm_device *device = pci->subdev.device; - nvkm_wr08(device, 0x088000 + addr, data); -} - -void -nv40_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data) -{ - struct nvkm_device *device = pci->subdev.device; - nvkm_wr32(device, 0x088000 + addr, data); -} - void nv40_pci_msi_rearm(struct nvkm_pci *pci) { @@ -52,9 +31,7 @@ nv40_pci_msi_rearm(struct nvkm_pci *pci) static const struct nvkm_pci_func nv40_pci_func = { - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, + .cfg = { .addr = 0x088000, .size = 0x1000 }, .msi_rearm = nv40_pci_msi_rearm, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c index 9cad17f178ec..0093eabac9ae 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c @@ -38,9 +38,7 @@ nv46_pci_msi_rearm(struct nvkm_pci *pci) static const struct nvkm_pci_func nv46_pci_func = { - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, + .cfg = { .addr = 0x088000, .size = 0x1000 }, .msi_rearm = nv46_pci_msi_rearm, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c index 741e34bf307c..b445081bb80e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c @@ -25,9 +25,7 @@ static const struct nvkm_pci_func nv4c_pci_func = { - .rd32 = nv40_pci_rd32, - .wr08 = nv40_pci_wr08, - .wr32 = nv40_pci_wr32, + .cfg = { .addr = 0x088000, .size = 0x1000 }, }; int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h index 9b7583532962..988eeee1471c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h @@ -8,10 +8,12 @@ int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *, enum nvkm_ struct nvkm_pci **); struct nvkm_pci_func { + struct { + u32 addr; + u16 size; + } cfg; + void (*init)(struct nvkm_pci *); - u32 (*rd32)(struct nvkm_pci *, u16 addr); - void (*wr08)(struct nvkm_pci *, u16 addr, u8 data); - void (*wr32)(struct nvkm_pci *, u16 addr, u32 data); void (*msi_rearm)(struct nvkm_pci *); struct { @@ -27,9 +29,6 @@ struct nvkm_pci_func { } pcie; }; -u32 nv40_pci_rd32(struct nvkm_pci *, u16); -void nv40_pci_wr08(struct nvkm_pci *, u16, u8); -void nv40_pci_wr32(struct nvkm_pci *, u16, u32); void nv40_pci_msi_rearm(struct nvkm_pci *); void nv46_pci_msi_rearm(struct nvkm_pci *); -- cgit v1.2.3 From 0adfd612c02f57c55ceac63a23baa702249fc612 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 30 Jan 2025 17:28:02 +1000 Subject: drm/nouveau/instmem: add hal for set_bar0_window_addr() GH100/GBxxx have moved the register that controls where in VRAM the the BAR0 NV_PRAMIN window points. Add a HAL for this, as the BAR0 window is needed for BAR2 bootstrap. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c | 11 +++++++++-- drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h | 1 + 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c index 1976d0030d17..150e22fde2ac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c @@ -317,6 +317,7 @@ r535_instmem_new(const struct nvkm_instmem_func *hw, rm->memory_new = hw->memory_new; rm->memory_wrap = hw->memory_wrap; rm->zero = false; + rm->set_bar0_window_addr = hw->set_bar0_window_addr; ret = nv50_instmem_new_(rm, device, type, inst, pinstmem); if (ret) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c index dd5b5a17ece0..0ef66d7d5e51 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c @@ -65,7 +65,7 @@ nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data) spin_lock_irqsave(&imem->base.lock, flags); if (unlikely(imem->addr != base)) { - nvkm_wr32(device, 0x001700, base >> 16); + imem->base.func->set_bar0_window_addr(device, base); imem->addr = base; } nvkm_wr32(device, 0x700000 + addr, data); @@ -85,7 +85,7 @@ nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset) spin_lock_irqsave(&imem->base.lock, flags); if (unlikely(imem->addr != base)) { - nvkm_wr32(device, 0x001700, base >> 16); + imem->base.func->set_bar0_window_addr(device, base); imem->addr = base; } data = nvkm_rd32(device, 0x700000 + addr); @@ -394,6 +394,12 @@ nv50_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, * instmem subdev implementation *****************************************************************************/ +static void +nv50_instmem_set_bar0_window_addr(struct nvkm_device *device, u64 addr) +{ + nvkm_wr32(device, 0x001700, addr >> 16); +} + static void nv50_instmem_fini(struct nvkm_instmem *base) { @@ -415,6 +421,7 @@ nv50_instmem = { .memory_new = nv50_instobj_new, .memory_wrap = nv50_instobj_wrap, .zero = false, + .set_bar0_window_addr = nv50_instmem_set_bar0_window_addr, }; int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h index 4c14c96fb60a..d5b5fcd9262b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h @@ -16,6 +16,7 @@ struct nvkm_instmem_func { bool zero, struct nvkm_memory **); int (*memory_wrap)(struct nvkm_instmem *, struct nvkm_memory *, struct nvkm_memory **); bool zero; + void (*set_bar0_window_addr)(struct nvkm_device *, u64 addr); }; int nv50_instmem_new_(const struct nvkm_instmem_func *, struct nvkm_device *, -- cgit v1.2.3 From 82df73d8ee007f62616fb6eb2e6bf6dc3b32c573 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 13 Feb 2025 07:42:28 +1000 Subject: drm/nouveau/mmu: bump up the maximum page table depth GH100/GBxxx have 6-level page tables. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h index 935b1cacd528..7188e3eb2d07 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h @@ -8,7 +8,7 @@ struct nvkm_vma { struct list_head head; struct rb_node tree; u64 addr; - u64 size:50; + u64 size; bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */ bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */ #define NVKM_VMA_PAGE_NONE 7 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index 9c97800fe037..b54397e5364c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c @@ -19,7 +19,7 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#define NVKM_VMM_LEVELS_MAX 5 +#define NVKM_VMM_LEVELS_MAX 6 #include "vmm.h" #include -- cgit v1.2.3 From 708d81a9f529a1bfdec62021690d03ae4ee2e964 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 13 Feb 2025 18:17:53 +1000 Subject: drm/nouveau/gsp: fetch level shift and PDE from BAR2 VMM When mirroring BAR2 page tables to RM, we need to know the level shift for the root page table (which is currently hardcoded), as well as the raw PDE value (which is currently hardcoded in GP1xx-AD1xx format). In order to support GH100/GBxxx, modify the code to determine the page shift from per-GPU info in nvkm_vmm_page, as well as read the relevant PDE back from the root page table rather than recalculating it. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c | 23 ++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c index b8fb8150ae48..91242f09648e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c @@ -50,7 +50,7 @@ r535_bar_bar2_wait(struct nvkm_bar *base) } static int -r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr) +r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u8 page_shift, u64 pdbe) { rpc_update_bar_pde_v15_00 *rpc; @@ -59,8 +59,8 @@ r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr) return -EIO; rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2; - rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */ - rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu! + rpc->info.entryValue = pdbe; + rpc->info.entryLevelShift = page_shift; return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); } @@ -68,12 +68,13 @@ r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr) static void r535_bar_bar2_fini(struct nvkm_bar *bar) { + struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm; struct nvkm_gsp *gsp = bar->subdev.device->gsp; bar->flushBAR2 = bar->flushBAR2PhysMode; nvkm_done(bar->flushFBZero); - WARN_ON(r535_bar_bar2_update_pde(gsp, 0)); + WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->func->page[0].shift, 0)); } static void @@ -82,8 +83,18 @@ r535_bar_bar2_init(struct nvkm_bar *bar) struct nvkm_device *device = bar->subdev.device; struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm; struct nvkm_gsp *gsp = device->gsp; - - WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->pd->pde[0]->pt[0]->addr)); + struct nvkm_memory *pdb = vmm->pd->pt[0]->memory; + u32 pdb_offset = vmm->pd->pt[0]->base; + u32 pdbe_lo, pdbe_hi; + u64 pdbe; + + nvkm_kmap(pdb); + pdbe_lo = nvkm_ro32(pdb, pdb_offset + 0); + pdbe_hi = nvkm_ro32(pdb, pdb_offset + 4); + pdbe = ((u64)pdbe_hi << 32) | pdbe_lo; + nvkm_done(pdb); + + WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->func->page[0].shift, pdbe)); vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb; if (!bar->flushFBZero) { -- cgit v1.2.3 From bc7849720b5275297b58db73a20d9a15dda5f353 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 8 May 2025 10:02:49 +1000 Subject: drm/nouveau/gsp: init client VMMs with NV0080_CTRL_DMA_SET_PAGE_DIRECTORY The current code using NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES not only requires changes to support the new page table layout used on Hopper/Blackwell GPUs, but is also broken in that it always mirrors the PDEs used for virtual address 0, rather than the area reserved for RM. This works fine for the non-NVK case where the kernel has full control of the VMM layout and things end up in the right place, but NVK puts its kernel reserved area much higher in the address space. Fixing the code to work at any VA is not enough as some parts of RM want the reserved area in a specific location, and NVK would then hit other assertions in RM instead. Fortunately, it appears that RM never needs to allocate anything within its reserved area for DRM clients, and the COPY_SERVER_RESERVED_PDES control call primarily serves to allow RM to locate the root page table when initialising a channel's instance block. Flag VMMs allocated by the DRM driver as externally owned, and use NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY to inform RM of the root page table in a similar way to NVIDIA's UVM driver. The COPY_SERVER_RESERVED_PDES paths are kept for the golden context image and gr scrubber channel, where RM needs the reserved area. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h | 1 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c | 3 +- .../drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h | 42 ++++++++++++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c | 66 +++++++++++++++++++--- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 3 +- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c | 8 +-- 7 files changed, 106 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h index 7188e3eb2d07..4cab139f3236 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h @@ -73,6 +73,7 @@ struct nvkm_vmm { struct nvkm_gsp_object object; struct nvkm_vma *rsvd; + bool external; } rm; }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c index 1f5cf21f3f61..ddb57d5e73d6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c @@ -276,7 +276,6 @@ r535_gr_oneinit(struct nvkm_gr *base) struct nvkm_device *device = subdev->device; struct nvkm_gsp *gsp = device->gsp; struct nvkm_rm *rm = gsp->rm; - struct nvkm_mmu *mmu = device->mmu; struct { struct nvkm_memory *inst; struct nvkm_vmm *vmm; @@ -295,7 +294,7 @@ r535_gr_oneinit(struct nvkm_gr *base) if (ret) goto done; - ret = mmu->func->promote_vmm(golden.vmm); + ret = r535_mmu_vaspace_new(golden.vmm, NVKM_RM_VASPACE, false); if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h index f58edf62e4ae..f6ec04efd119 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h @@ -23,6 +23,11 @@ typedef struct #define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 // +#include #include "nvrm/vmm.h" +void +r535_mmu_vaspace_del(struct nvkm_vmm *vmm) +{ + if (vmm->rm.external) { + NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.object, + NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY, + sizeof(*ctrl)); + if (!IS_ERR(ctrl)) { + ctrl->hVASpace = vmm->rm.object.handle; + + WARN_ON(nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.object, ctrl)); + } + + vmm->rm.external = false; + } + + nvkm_gsp_rm_free(&vmm->rm.object); + nvkm_gsp_device_dtor(&vmm->rm.device); + nvkm_gsp_client_dtor(&vmm->rm.client); + + nvkm_vmm_put(vmm, &vmm->rm.rsvd); +} + int -r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle) +r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle, bool external) { NV_VASPACE_ALLOCATION_PARAMETERS *args; int ret; @@ -40,12 +66,14 @@ r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle) return PTR_ERR(args); args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW; + if (external) + args->flags = NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED; ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args); if (ret) return ret; - { + if (!external) { NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl; mutex_lock(&vmm->mutex.vmm); @@ -55,6 +83,11 @@ r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle) if (ret) return ret; + /* Some parts of RM expect the server-reserved area to be in a specific location. */ + if (WARN_ON(vmm->rm.rsvd->addr != SPLIT_VAS_SERVER_RM_MANAGED_VA_START || + vmm->rm.rsvd->size != SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE)) + return -EINVAL; + ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object, NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES, sizeof(*ctrl)); @@ -73,14 +106,29 @@ r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle) ctrl->levels[1].size = 0x1000; ctrl->levels[1].aperture = 1; ctrl->levels[1].pageShift = 0x26; - if (vmm->pd->pde[0]->pde[0]) { - ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr; - ctrl->levels[2].size = 0x1000; - ctrl->levels[2].aperture = 1; - ctrl->levels[2].pageShift = 0x1d; - } + ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr; + ctrl->levels[2].size = 0x1000; + ctrl->levels[2].aperture = 1; + ctrl->levels[2].pageShift = 0x1d; ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl); + } else { + NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *ctrl; + + ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.object, + NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY, + sizeof(*ctrl)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); + + ctrl->physAddress = vmm->pd->pt[0]->addr; + ctrl->numEntries = 1 << vmm->func->page[0].desc->bits; + ctrl->flags = NVDEF(NV0080_CTRL_DMA_SET_PAGE_DIRECTORY, FLAGS, APERTURE, VIDMEM); + ctrl->hVASpace = vmm->rm.object.handle; + + ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.object, ctrl); + if (ret == 0) + vmm->rm.external = true; } return ret; @@ -89,7 +137,7 @@ r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle) static int r535_mmu_promote_vmm(struct nvkm_vmm *vmm) { - return r535_mmu_vaspace_new(vmm, NVKM_RM_VASPACE); + return r535_mmu_vaspace_new(vmm, NVKM_RM_VASPACE, true); } static void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c index c92ec231f09a..b6cced9b8aa1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c @@ -126,7 +126,7 @@ r570_gr_scrubber_init(struct r535_gr *gr) if (ret) goto done; - ret = r535_mmu_vaspace_new(gr->scrubber.vmm, KGRAPHICS_SCRUBBER_HANDLE_VAS); + ret = r535_mmu_vaspace_new(gr->scrubber.vmm, KGRAPHICS_SCRUBBER_HANDLE_VAS, false); if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index fc63ac61a9d1..ead48c106bb6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -147,7 +147,8 @@ extern const struct nvkm_rm_api_alloc r535_alloc; extern const struct nvkm_rm_api_client r535_client; void r535_gsp_client_dtor(struct nvkm_gsp_client *); extern const struct nvkm_rm_api_device r535_device; -int r535_mmu_vaspace_new(struct nvkm_vmm *, u32 handle); +int r535_mmu_vaspace_new(struct nvkm_vmm *, u32 handle, bool external); +void r535_mmu_vaspace_del(struct nvkm_vmm *); extern const struct nvkm_rm_api_fbsr r535_fbsr; void r535_fbsr_resume(struct nvkm_gsp *); int r535_fbsr_memlist(struct nvkm_gsp_device *, u32 handle, enum nvkm_memory_target, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index b54397e5364c..f95c58b67633 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c @@ -1030,12 +1030,8 @@ nvkm_vmm_dtor(struct nvkm_vmm *vmm) struct nvkm_vma *vma; struct rb_node *node; - if (vmm->rm.client.gsp) { - nvkm_gsp_rm_free(&vmm->rm.object); - nvkm_gsp_device_dtor(&vmm->rm.device); - nvkm_gsp_client_dtor(&vmm->rm.client); - nvkm_vmm_put(vmm, &vmm->rm.rsvd); - } + if (vmm->rm.client.gsp) + r535_mmu_vaspace_del(vmm); if (0) nvkm_vmm_dump(vmm); -- cgit v1.2.3 From 6c3ac7bcfcff1409c6b29dd074332960d30acc29 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 9 May 2025 05:21:17 +1000 Subject: drm/nouveau/gsp: support deeper page tables in COPY_SERVER_RESERVED_PDES Use data from 'struct nvkm_vmm_page/desc' to determine which PDEs need to be mirrored to RM instead of hardcoded values for pre-Hopper page tables. Needed to support Hopper/Blackwell. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c | 47 +++++++++++++++------- 1 file changed, 32 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c index dbddc5cc333d..52f2e5f14517 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c @@ -75,9 +75,22 @@ r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle, bool external) if (!external) { NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl; + u8 page_shift = 29; /* 512MiB */ + const u64 page_size = BIT_ULL(page_shift); + const struct nvkm_vmm_page *page; + const struct nvkm_vmm_desc *desc; + struct nvkm_vmm_pt *pd = vmm->pd; + + for (page = vmm->func->page; page->shift; page++) { + if (page->shift == page_shift) + break; + } + + if (WARN_ON(!page->shift)) + return -EINVAL; mutex_lock(&vmm->mutex.vmm); - ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000, + ret = nvkm_vmm_get_locked(vmm, true, false, false, page_shift, 32, page_size, &vmm->rm.rsvd); mutex_unlock(&vmm->mutex.vmm); if (ret) @@ -94,22 +107,26 @@ r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle, bool external) if (IS_ERR(ctrl)) return PTR_ERR(ctrl); - ctrl->pageSize = 0x20000000; + ctrl->pageSize = page_size; ctrl->virtAddrLo = vmm->rm.rsvd->addr; ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1; - ctrl->numLevelsToCopy = vmm->pd->pde[0]->pde[0] ? 3 : 2; - ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr; - ctrl->levels[0].size = 0x20; - ctrl->levels[0].aperture = 1; - ctrl->levels[0].pageShift = 0x2f; - ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr; - ctrl->levels[1].size = 0x1000; - ctrl->levels[1].aperture = 1; - ctrl->levels[1].pageShift = 0x26; - ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr; - ctrl->levels[2].size = 0x1000; - ctrl->levels[2].aperture = 1; - ctrl->levels[2].pageShift = 0x1d; + + for (desc = page->desc; desc->bits; desc++) { + ctrl->numLevelsToCopy++; + page_shift += desc->bits; + } + desc--; + + for (int i = 0; i < ctrl->numLevelsToCopy; i++, desc--) { + page_shift -= desc->bits; + + ctrl->levels[i].physAddress = pd->pt[0]->addr; + ctrl->levels[i].size = (1 << desc->bits) * desc->size; + ctrl->levels[i].aperture = 1; + ctrl->levels[i].pageShift = page_shift; + + pd = pd->pde[0]; + } ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl); } else { -- cgit v1.2.3 From b1ca384772b657df433acf0c36f0771d5ebe1138 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 7 Feb 2025 14:16:11 +1000 Subject: drm/nouveau/gv100-: switch to volta semaphore methods HOPPER_CHANNEL_GPFIFO_A removes the SEMAPHORE[A-D] methods that are currently used by nouveau to implement fences on GF100 and newer. Switch to the newer SEM methods available from VOLTA_CHANNEL_GPFIFO, which are also available on the Hopper/Blackwell host classes. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/Kbuild | 1 + drivers/gpu/drm/nouveau/gv100_fence.c | 93 ++++++++++++++++++++++ .../gpu/drm/nouveau/include/nvhw/class/clc36f.h | 52 ++++++++++++ drivers/gpu/drm/nouveau/include/nvif/push906f.h | 1 + drivers/gpu/drm/nouveau/nouveau_drm.c | 4 +- drivers/gpu/drm/nouveau/nouveau_fence.h | 1 + 6 files changed, 151 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/nouveau/gv100_fence.c create mode 100644 drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild index 0759ba15954b..385d24530d1e 100644 --- a/drivers/gpu/drm/nouveau/Kbuild +++ b/drivers/gpu/drm/nouveau/Kbuild @@ -69,5 +69,6 @@ nouveau-y += nv17_fence.o nouveau-y += nv50_fence.o nouveau-y += nv84_fence.o nouveau-y += nvc0_fence.o +nouveau-y += gv100_fence.o obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o diff --git a/drivers/gpu/drm/nouveau/gv100_fence.c b/drivers/gpu/drm/nouveau/gv100_fence.c new file mode 100644 index 000000000000..cccdeca72002 --- /dev/null +++ b/drivers/gpu/drm/nouveau/gv100_fence.c @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "nouveau_drv.h" +#include "nouveau_dma.h" +#include "nouveau_fence.h" + +#include "nv50_display.h" + +#include + +#include + +static int +gv100_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence) +{ + struct nvif_push *push = &chan->chan.push; + int ret; + + ret = PUSH_WAIT(push, 8); + if (ret) + return ret; + + PUSH_MTHD(push, NVC36F, SEM_ADDR_LO, lower_32_bits(virtual), + SEM_ADDR_HI, upper_32_bits(virtual), + SEM_PAYLOAD_LO, sequence); + + PUSH_MTHD(push, NVC36F, SEM_EXECUTE, + NVDEF(NVC36F, SEM_EXECUTE, OPERATION, RELEASE) | + NVDEF(NVC36F, SEM_EXECUTE, RELEASE_WFI, EN) | + NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT) | + NVDEF(NVC36F, SEM_EXECUTE, RELEASE_TIMESTAMP, DIS)); + + PUSH_MTHD(push, NVC36F, NON_STALL_INTERRUPT, 0); + + PUSH_KICK(push); + return 0; +} + +static int +gv100_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence) +{ + struct nvif_push *push = &chan->chan.push; + int ret; + + ret = PUSH_WAIT(push, 6); + if (ret) + return ret; + + PUSH_MTHD(push, NVC36F, SEM_ADDR_LO, lower_32_bits(virtual), + SEM_ADDR_HI, upper_32_bits(virtual), + SEM_PAYLOAD_LO, sequence); + + PUSH_MTHD(push, NVC36F, SEM_EXECUTE, + NVDEF(NVC36F, SEM_EXECUTE, OPERATION, ACQ_CIRC_GEQ) | + NVDEF(NVC36F, SEM_EXECUTE, ACQUIRE_SWITCH_TSG, EN) | + NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT)); + + PUSH_KICK(push); + return 0; +} + +static int +gv100_fence_context_new(struct nouveau_channel *chan) +{ + struct nv84_fence_chan *fctx; + int ret; + + ret = nv84_fence_context_new(chan); + if (ret) + return ret; + + fctx = chan->fence; + fctx->base.emit32 = gv100_fence_emit32; + fctx->base.sync32 = gv100_fence_sync32; + return 0; +} + +int +gv100_fence_create(struct nouveau_drm *drm) +{ + struct nv84_fence_priv *priv; + int ret; + + ret = nv84_fence_create(drm); + if (ret) + return ret; + + priv = drm->fence; + priv->base.context_new = gv100_fence_context_new; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h new file mode 100644 index 000000000000..8735dda4c8a7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef _clc36f_h_ +#define _clc36f_h_ + +#define NVC36F_NON_STALL_INTERRUPT (0x00000020) +#define NVC36F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVC36F_SEM_ADDR_LO (0x0000005c) +#define NVC36F_SEM_ADDR_LO_OFFSET 31:2 +#define NVC36F_SEM_ADDR_HI (0x00000060) +#define NVC36F_SEM_ADDR_HI_OFFSET 7:0 +#define NVC36F_SEM_PAYLOAD_LO (0x00000064) +#define NVC36F_SEM_PAYLOAD_LO_PAYLOAD 31:0 +#define NVC36F_SEM_PAYLOAD_HI (0x00000068) +#define NVC36F_SEM_PAYLOAD_HI_PAYLOAD 31:0 +#define NVC36F_SEM_EXECUTE (0x0000006c) +#define NVC36F_SEM_EXECUTE_OPERATION 2:0 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000 +#define NVC36F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005 +#define NVC36F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006 +#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12 +#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000 +#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001 +#define NVC36F_SEM_EXECUTE_RELEASE_WFI 20:20 +#define NVC36F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000 +#define NVC36F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001 +#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE 24:24 +#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000 +#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001 +#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25 +#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000 +#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001 +#define NVC36F_SEM_EXECUTE_REDUCTION 30:27 +#define NVC36F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000 +#define NVC36F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001 +#define NVC36F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002 +#define NVC36F_SEM_EXECUTE_REDUCTION_IAND 0x00000003 +#define NVC36F_SEM_EXECUTE_REDUCTION_IOR 0x00000004 +#define NVC36F_SEM_EXECUTE_REDUCTION_IADD 0x00000005 +#define NVC36F_SEM_EXECUTE_REDUCTION_INC 0x00000006 +#define NVC36F_SEM_EXECUTE_REDUCTION_DEC 0x00000007 +#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT 31:31 +#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000 +#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001 + +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvif/push906f.h b/drivers/gpu/drm/nouveau/include/nvif/push906f.h index cc2866bc8b0a..79df71de98d2 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/push906f.h +++ b/drivers/gpu/drm/nouveau/include/nvif/push906f.h @@ -7,6 +7,7 @@ #ifndef PUSH906F_SUBC // Host methods #define PUSH906F_SUBC_NV906F 0 +#define PUSH906F_SUBC_NVC36F 0 // Twod #define PUSH906F_SUBC_NV902D 3 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index c69139701056..e7544942791d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -503,11 +503,13 @@ nouveau_accel_init(struct nouveau_drm *drm) case KEPLER_CHANNEL_GPFIFO_B: case MAXWELL_CHANNEL_GPFIFO_A: case PASCAL_CHANNEL_GPFIFO_A: + ret = nvc0_fence_create(drm); + break; case VOLTA_CHANNEL_GPFIFO_A: case TURING_CHANNEL_GPFIFO_A: case AMPERE_CHANNEL_GPFIFO_A: case AMPERE_CHANNEL_GPFIFO_B: - ret = nvc0_fence_create(drm); + ret = gv100_fence_create(drm); break; default: break; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 8bc065acfe35..6a983dd9f7b9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -83,6 +83,7 @@ void nv17_fence_resume(struct nouveau_drm *drm); int nv50_fence_create(struct nouveau_drm *); int nv84_fence_create(struct nouveau_drm *); int nvc0_fence_create(struct nouveau_drm *); +int gv100_fence_create(struct nouveau_drm *); struct nv84_fence_chan { struct nouveau_fence_chan base; -- cgit v1.2.3 From 76b8f81a5b928cfb81d0c1477ab9be1e7d03660c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 21 Mar 2025 13:35:17 +1000 Subject: drm/nouveau: improve handling of 64-bit BARs GPUs exist now with a 64-bit BAR0, which mean that BAR1 and BAR2's indices (as passed to pci_resource_len() etc) are bumped up by one. Modify nvkm_device.resource_addr/size() to take an enum instead of an integer bar index, and take IORESOURCE_MEM_64 into account when translating to the "raw" bar id. [airlied: fixup ERR_PTR] Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvkm/core/device.h | 11 ++++++-- drivers/gpu/drm/nouveau/nouveau_abi16.c | 2 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 4 +-- drivers/gpu/drm/nouveau/nouveau_chan.c | 4 ++- drivers/gpu/drm/nouveau/nouveau_ttm.c | 12 ++++---- drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 4 +-- drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c | 32 +++++++++++++++++++--- drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c | 18 +++++++++--- drivers/gpu/drm/nouveau/nvkm/engine/device/user.c | 4 +-- drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c | 6 ++-- drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c | 4 +-- drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c | 1 - drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c | 2 +- drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c | 14 +++++----- drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c | 4 +-- .../gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h | 4 +-- drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c | 2 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c | 2 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 6 ++-- .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c | 6 ++-- drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c | 10 ++----- drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c | 2 +- 36 files changed, 109 insertions(+), 73 deletions(-) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index 46afb877a296..f50f52d4dc3f 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h @@ -77,6 +77,13 @@ struct nvkm_device { struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int type, int inst); struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int type, int inst); +enum nvkm_bar_id { + NVKM_BAR_INVALID = 0, + NVKM_BAR0_PRI, + NVKM_BAR1_FB, + NVKM_BAR2_INST, +}; + struct nvkm_device_func { struct nvkm_device_pci *(*pci)(struct nvkm_device *); struct nvkm_device_tegra *(*tegra)(struct nvkm_device *); @@ -85,8 +92,8 @@ struct nvkm_device_func { int (*init)(struct nvkm_device *); void (*fini)(struct nvkm_device *, bool suspend); int (*irq)(struct nvkm_device *); - resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar); - resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar); + resource_size_t (*resource_addr)(struct nvkm_device *, enum nvkm_bar_id); + resource_size_t (*resource_size)(struct nvkm_device *, enum nvkm_bar_id); bool cpu_coherent; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 2a0617e5fe2a..4c100005ef81 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -315,7 +315,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) break; } case NOUVEAU_GETPARAM_VRAM_BAR_SIZE: - getparam->value = nvkm_device->func->resource_size(nvkm_device, 1); + getparam->value = nvkm_device->func->resource_size(nvkm_device, NVKM_BAR1_FB); break; case NOUVEAU_GETPARAM_VRAM_USED: { struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 2016c1e7242f..9ab8380feb39 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1204,7 +1204,7 @@ retry: fallthrough; /* tiled memory */ case TTM_PL_VRAM: reg->bus.offset = (reg->start << PAGE_SHIFT) + - device->func->resource_addr(device, 1); + device->func->resource_addr(device, NVKM_BAR1_FB); reg->bus.is_iomem = true; /* Some BARs do not support being ioremapped WC */ @@ -1295,7 +1295,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); struct nvkm_device *device = nvxx_device(drm); - u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT; + u32 mappable = device->func->resource_size(device, NVKM_BAR1_FB) >> PAGE_SHIFT; int i, ret; /* as long as the bo isn't in vram, and isn't tiled, we've got diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 1286a664f688..5bcd29809c1e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -209,13 +209,15 @@ nouveau_channel_prep(struct nouveau_cli *cli, } else if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) { if (device->info.family == NV_DEVICE_INFO_V0_TNT) { + struct nvkm_device *nvkm_device = nvxx_device(drm); + /* nv04 vram pushbuf hack, retarget to its location in * the framebuffer bar rather than direct vram access.. * nfi why this exists, it came from the -nv ddx. */ args.target = NV_DMA_V0_TARGET_PCI; args.access = NV_DMA_V0_ACCESS_RDWR; - args.start = nvxx_device(drm)->func->resource_addr(nvxx_device(drm), 1); + args.start = nvkm_device->func->resource_addr(nvkm_device, NVKM_BAR1_FB); args.limit = args.start + device->info.ram_user - 1; } else { args.target = NV_DMA_V0_TARGET_VRAM; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index e244927eb5d4..7d2436e5d50d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -312,8 +312,8 @@ nouveau_ttm_init(struct nouveau_drm *drm) /* VRAM init */ drm->gem.vram_available = drm->client.device.info.ram_user; - arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1), - device->func->resource_size(device, 1)); + arch_io_reserve_memtype_wc(device->func->resource_addr(device, NVKM_BAR1_FB), + device->func->resource_size(device, NVKM_BAR1_FB)); ret = nouveau_ttm_init_vram(drm); if (ret) { @@ -321,8 +321,8 @@ nouveau_ttm_init(struct nouveau_drm *drm) return ret; } - drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1), - device->func->resource_size(device, 1)); + drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, NVKM_BAR1_FB), + device->func->resource_size(device, NVKM_BAR1_FB)); /* GART init */ if (!drm->agp.bridge) { @@ -357,7 +357,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm) arch_phys_wc_del(drm->ttm.mtrr); drm->ttm.mtrr = 0; - arch_io_free_memtype_wc(device->func->resource_addr(device, 1), - device->func->resource_size(device, 1)); + arch_io_free_memtype_wc(device->func->resource_addr(device, NVKM_BAR1_FB), + device->func->resource_size(device, NVKM_BAR1_FB)); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 0cd20d0f8782..ebcaf2ecff48 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -3027,8 +3027,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func, device->debug = nvkm_dbgopt(device->dbgopt, "device"); INIT_LIST_HEAD(&device->subdev); - mmio_base = device->func->resource_addr(device, 0); - mmio_size = device->func->resource_size(device, 0); + mmio_base = device->func->resource_addr(device, NVKM_BAR0_PRI); + mmio_size = device->func->resource_size(device, NVKM_BAR0_PRI); device->pri = ioremap(mmio_base, mmio_size); if (device->pri == NULL) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c index 3ff6436007fa..8f0261a0d618 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c @@ -1560,18 +1560,42 @@ nvkm_device_pci(struct nvkm_device *device) return container_of(device, struct nvkm_device_pci, device); } +static int +nvkm_device_pci_resource_idx(struct nvkm_device_pci *pdev, enum nvkm_bar_id bar) +{ + int idx = 0; + + if (bar == NVKM_BAR0_PRI) + return idx; + + idx += (pci_resource_flags(pdev->pdev, idx) & IORESOURCE_MEM_64) ? 2 : 1; + if (bar == NVKM_BAR1_FB) + return idx; + + idx += (pci_resource_flags(pdev->pdev, idx) & IORESOURCE_MEM_64) ? 2 : 1; + if (bar == NVKM_BAR2_INST) + return idx; + + WARN_ON(1); + return -1; +} + static resource_size_t -nvkm_device_pci_resource_addr(struct nvkm_device *device, unsigned bar) +nvkm_device_pci_resource_addr(struct nvkm_device *device, enum nvkm_bar_id bar) { struct nvkm_device_pci *pdev = nvkm_device_pci(device); - return pci_resource_start(pdev->pdev, bar); + int idx = nvkm_device_pci_resource_idx(pdev, bar); + + return idx >= 0 ? pci_resource_start(pdev->pdev, idx) : 0; } static resource_size_t -nvkm_device_pci_resource_size(struct nvkm_device *device, unsigned bar) +nvkm_device_pci_resource_size(struct nvkm_device *device, enum nvkm_bar_id bar) { struct nvkm_device_pci *pdev = nvkm_device_pci(device); - return pci_resource_len(pdev->pdev, bar); + int idx = nvkm_device_pci_resource_idx(pdev, bar); + + return idx >= 0 ? pci_resource_len(pdev->pdev, idx) : 0; } static int diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index 78a83f904bbd..0ca2dfe99676 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -186,21 +186,31 @@ nvkm_device_tegra(struct nvkm_device *device) } static struct resource * -nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar) +nvkm_device_tegra_resource(struct nvkm_device *device, enum nvkm_bar_id bar) { struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); - return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar); + int idx; + + switch (bar) { + case NVKM_BAR0_PRI: idx = 0; break; + case NVKM_BAR1_FB : idx = 1; break; + default: + WARN_ON(1); + return ERR_PTR(-EINVAL); + } + + return platform_get_resource(tdev->pdev, IORESOURCE_MEM, idx); } static resource_size_t -nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar) +nvkm_device_tegra_resource_addr(struct nvkm_device *device, enum nvkm_bar_id bar) { struct resource *res = nvkm_device_tegra_resource(device, bar); return res ? res->start : 0; } static resource_size_t -nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar) +nvkm_device_tegra_resource_size(struct nvkm_device *device, enum nvkm_bar_id bar) { struct resource *res = nvkm_device_tegra_resource(device, bar); return res ? resource_size(res) : 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c index d7f75b3a43c8..1f331ec8d747 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c @@ -209,8 +209,8 @@ nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_udevice *udev = nvkm_udevice(object); struct nvkm_device *device = udev->device; *type = NVKM_OBJECT_MAP_IO; - *addr = device->func->resource_addr(device, 0); - *size = device->func->resource_size(device, 0); + *addr = device->func->resource_addr(device, NVKM_BAR0_PRI); + *size = device->func->resource_size(device, NVKM_BAR0_PRI); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c index 4e43ee383c34..9b84e357d354 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c @@ -49,7 +49,7 @@ nvkm_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc, { struct nvkm_disp_chan *chan = nvkm_disp_chan(object); struct nvkm_device *device = chan->disp->engine.subdev.device; - const u64 base = device->func->resource_addr(device, 0); + const u64 base = device->func->resource_addr(device, NVKM_BAR0_PRI); *type = NVKM_OBJECT_MAP_IO; *addr = base + chan->func->user(chan, size); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c index cfa3698d3a2f..614921166fba 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c @@ -805,7 +805,7 @@ gv100_disp_caps_map(struct nvkm_object *object, void *argv, u32 argc, struct gv100_disp_caps *caps = gv100_disp_caps(object); struct nvkm_device *device = caps->disp->engine.subdev.device; *type = NVKM_OBJECT_MAP_IO; - *addr = 0x640000 + device->func->resource_addr(device, 0); + *addr = 0x640000 + device->func->resource_addr(device, NVKM_BAR0_PRI); *size = 0x1000; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c index 3c2ca711dc5c..fdffa0391b31 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c @@ -303,7 +303,7 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine) } /* Allocate USERD + BAR1 polling area. */ - if (fifo->func->chan.func->userd->bar == 1) { + if (fifo->func->chan.func->userd->bar == NVKM_BAR1_FB) { struct nvkm_vmm *bar1 = nvkm_bar_bar1_vmm(device); ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, fifo->chid->nr * diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c index 78be7abc90d1..4e09985424b6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c @@ -355,14 +355,14 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru /* Validate arguments against class requirements. */ if ((runq && runq >= runl->func->runqs) || (!func->inst->vmm != !vmm) || - ((func->userd->bar < 0) == !userd) || + (!func->userd->bar == !userd) || (!func->ramfc->ctxdma != !dmaobj) || ((func->ramfc->devm < devm) && devm != BIT(0)) || (!func->ramfc->priv && priv)) { RUNL_DEBUG(runl, "args runq:%d:%d vmm:%d:%p userd:%d:%p " "push:%d:%p devm:%08x:%08x priv:%d:%d", runl->func->runqs, runq, func->inst->vmm, vmm, - func->userd->bar < 0, userd, func->ramfc->ctxdma, dmaobj, + func->userd->bar, userd, func->ramfc->ctxdma, dmaobj, func->ramfc->devm, devm, func->ramfc->priv, priv); return -EINVAL; } @@ -439,7 +439,7 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru /* Allocate channel ID. */ chan->id = nvkm_chid_get(runl->chid, chan); if (chan->id >= 0) { - if (func->userd->bar < 0) { + if (!func->userd->bar) { if (ouserd + chan->func->userd->size >= nvkm_memory_size(userd)) { RUNL_DEBUG(runl, "ouserd %llx", ouserd); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h index 85b94f699128..445db5dfd1e4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h @@ -24,7 +24,7 @@ struct nvkm_chan_func { } *inst; const struct nvkm_chan_func_userd { - int bar; + enum nvkm_bar_id bar; u32 base; u32 size; void (*clear)(struct nvkm_chan *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c index 6c94451d0faa..e4a4fad2eafc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c @@ -133,7 +133,7 @@ gf100_chan_userd_clear(struct nvkm_chan *chan) static const struct nvkm_chan_func_userd gf100_chan_userd = { - .bar = 1, + .bar = NVKM_BAR1_FB, .size = 0x1000, .clear = gf100_chan_userd_clear, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index d8a4d773a58c..5655eda52a7b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c @@ -113,7 +113,7 @@ gk104_chan_ramfc = { const struct nvkm_chan_func_userd gk104_chan_userd = { - .bar = 1, + .bar = NVKM_BAR1_FB, .size = 0x200, .clear = gf100_chan_userd_clear, }; @@ -745,7 +745,7 @@ gk104_fifo_init(struct nvkm_fifo *fifo) { struct nvkm_device *device = fifo->engine.subdev.device; - if (fifo->func->chan.func->userd->bar == 1) + if (fifo->func->chan.func->userd->bar == NVKM_BAR1_FB) nvkm_wr32(device, 0x002254, 0x10000000 | fifo->userd.bar1->addr >> 12); nvkm_wr32(device, 0x002100, 0xffffffff); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c index 33066c8cdc64..d7f046c03cfd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c @@ -70,7 +70,6 @@ gv100_chan_ramfc = { const struct nvkm_chan_func_userd gv100_chan_userd = { - .bar = -1, .size = 0x200, .clear = gf100_chan_userd_clear, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c index 674faf002b20..c4b8e567d86f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c @@ -154,7 +154,7 @@ nv04_chan_ramfc = { const struct nvkm_chan_func_userd nv04_chan_userd = { - .bar = 0, + .bar = NVKM_BAR0_PRI, .base = 0x800000, .size = 0x010000, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c index e50a94b6d7f8..084ca5561ee1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c @@ -93,7 +93,7 @@ nv40_chan_ramfc = { static const struct nvkm_chan_func_userd nv40_chan_userd = { - .bar = 0, + .bar = NVKM_BAR0_PRI, .base = 0xc00000, .size = 0x001000, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c index 954b5f3a7d57..7bf77661157d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c @@ -124,7 +124,7 @@ nv50_chan_ramfc = { const struct nvkm_chan_func_userd nv50_chan_userd = { - .bar = 0, + .bar = NVKM_BAR0_PRI, .base = 0xc00000, .size = 0x002000, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c index 9e56bcc166ed..52420a1edca5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c @@ -258,7 +258,7 @@ nvkm_uchan_map(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_chan *chan = nvkm_uchan(object)->chan; struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device; - if (chan->func->userd->bar < 0) + if (!chan->func->userd->bar) return -ENOSYS; *type = NVKM_OBJECT_MAP_IO; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c index 02a8c62a0a32..13407fafe947 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c @@ -297,7 +297,7 @@ nv20_gr_init(struct nvkm_gr *base) nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp); /* begin RAM config */ - vramsz = device->func->resource_size(device, 1) - 1; + vramsz = device->func->resource_size(device, NVKM_BAR1_FB) - 1; nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200)); nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204)); nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c index a5e1f02791b4..b609b0150ba1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c @@ -386,7 +386,7 @@ nv40_gr_init(struct nvkm_gr *base) } /* begin RAM config */ - vramsz = device->func->resource_size(device, 1) - 1; + vramsz = device->func->resource_size(device, NVKM_BAR1_FB) - 1; switch (device->chipset) { case 0x40: nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200)); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c index 51070b7dda85..e5e60915029c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c @@ -82,7 +82,7 @@ gf100_bar_bar2_init(struct nvkm_bar *base) static int gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm, - struct lock_class_key *key, int bar_nr) + struct lock_class_key *key, enum nvkm_bar_id bar_id) { struct nvkm_device *device = bar->base.subdev.device; resource_size_t bar_len; @@ -93,14 +93,14 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm, if (ret) return ret; - bar_len = device->func->resource_size(device, bar_nr); + bar_len = device->func->resource_size(device, bar_id); if (!bar_len) return -ENOMEM; - if (bar_nr == 3 && bar->bar2_halve) + if (bar_id == NVKM_BAR2_INST && bar->bar2_halve) bar_len >>= 1; ret = nvkm_vmm_new(device, 0, bar_len, NULL, 0, key, - (bar_nr == 3) ? "bar2" : "bar1", &bar_vm->vmm); + (bar_id == NVKM_BAR2_INST) ? "bar2" : "bar1", &bar_vm->vmm); if (ret) return ret; @@ -110,7 +110,7 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm, /* * Bootstrap page table lookup. */ - if (bar_nr == 3) { + if (bar_id == NVKM_BAR2_INST) { ret = nvkm_vmm_boot(bar_vm->vmm); if (ret) return ret; @@ -129,7 +129,7 @@ gf100_bar_oneinit(struct nvkm_bar *base) /* BAR2 */ if (bar->base.func->bar2.init) { - ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, 3); + ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, NVKM_BAR2_INST); if (ret) return ret; @@ -138,7 +138,7 @@ gf100_bar_oneinit(struct nvkm_bar *base) } /* BAR1 */ - ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, 1); + ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, NVKM_BAR1_FB); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c index 27d8a1be43e4..6a881becb02c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c @@ -127,7 +127,7 @@ nv50_bar_oneinit(struct nvkm_bar *base) /* BAR2 */ start = 0x0100000000ULL; - size = device->func->resource_size(device, 3); + size = device->func->resource_size(device, NVKM_BAR2_INST); if (!size) return -ENOMEM; limit = start + size; @@ -167,7 +167,7 @@ nv50_bar_oneinit(struct nvkm_bar *base) /* BAR1 */ start = 0x0000000000ULL; - size = device->func->resource_size(device, 1); + size = device->func->resource_size(device, NVKM_BAR1_FB); if (!size) return -ENOMEM; limit = start + size; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h index 6c5bbff12eb4..b918e22df5a8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h @@ -47,8 +47,8 @@ static inline struct io_mapping * fbmem_init(struct nvkm_device *dev) { - return io_mapping_create_wc(dev->func->resource_addr(dev, 1), - dev->func->resource_size(dev, 1)); + return io_mapping_create_wc(dev->func->resource_addr(dev, NVKM_BAR1_FB), + dev->func->resource_size(dev, NVKM_BAR1_FB)); } static inline void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c index c123e5893d76..cd2fbc0472d8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c @@ -50,7 +50,7 @@ nvkm_ufault_map(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object); struct nvkm_device *device = buffer->fault->subdev.device; *type = NVKM_OBJECT_MAP_IO; - *addr = device->func->resource_addr(device, 3) + buffer->addr; + *addr = device->func->resource_addr(device, NVKM_BAR2_INST) + buffer->addr; *size = nvkm_memory_size(buffer->mem); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c index 91242f09648e..d06bf95b9a4a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c @@ -191,7 +191,7 @@ r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device, } *pbar = bar; - bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE); + bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, NVKM_BAR2_INST), PAGE_SIZE); if (!bar->flushBAR2PhysMode) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index e2171d0d25be..fe00425c5479 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -901,9 +901,9 @@ r535_gsp_set_system_info(struct nvkm_gsp *gsp) if (IS_ERR(info)) return PTR_ERR(info); - info->gpuPhysAddr = device->func->resource_addr(device, 0); - info->gpuPhysFbAddr = device->func->resource_addr(device, 1); - info->gpuPhysInstAddr = device->func->resource_addr(device, 3); + info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI); + info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB); + info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST); info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev); info->maxUserVa = TASK_SIZE; info->pciConfigMirrorBase = device->pci->func->cfg.addr; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c index a3c070d41923..730dcb645cca 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c @@ -153,9 +153,9 @@ r570_gsp_set_system_info(struct nvkm_gsp *gsp) if (IS_ERR(info)) return PTR_ERR(info); - info->gpuPhysAddr = device->func->resource_addr(device, 0); - info->gpuPhysFbAddr = device->func->resource_addr(device, 1); - info->gpuPhysInstAddr = device->func->resource_addr(device, 3); + info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI); + info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB); + info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST); info->nvDomainBusDeviceFunc = pci_dev_id(pdev); info->maxUserVa = TASK_SIZE; info->pciConfigMirrorBase = device->pci->func->cfg.addr; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c index 6b462f960922..2544b9f0ec85 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c @@ -239,7 +239,6 @@ nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int ins struct nvkm_instmem **pimem) { struct nv40_instmem *imem; - int bar; if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) return -ENOMEM; @@ -247,13 +246,8 @@ nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int ins *pimem = &imem->base; /* map bar */ - if (device->func->resource_size(device, 2)) - bar = 2; - else - bar = 3; - - imem->iomem = ioremap_wc(device->func->resource_addr(device, bar), - device->func->resource_size(device, bar)); + imem->iomem = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST), + device->func->resource_size(device, NVKM_BAR2_INST)); if (!imem->iomem) { nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n"); return -EFAULT; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c index 0ef66d7d5e51..9d29e5234734 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c @@ -172,7 +172,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) /* Make the mapping visible to the host. */ iobj->bar = bar; - iobj->map = ioremap_wc(device->func->resource_addr(device, 3) + + iobj->map = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST) + (u32)iobj->bar->addr, size); if (!iobj->map) { nvkm_warn(subdev, "PRAMIN ioremap failed\n"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c index d9c9bee45222..160a5749a29f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c @@ -60,7 +60,7 @@ gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, if (ret) return ret; - *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr; + *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + (*pvma)->addr; *psize = (*pvma)->size; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c index 79a3b0cc9f5b..1e3db52de6cb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c @@ -41,7 +41,7 @@ nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, if ((ret = nvif_unvers(ret, &argv, &argc, args->vn))) return ret; - *paddr = device->func->resource_addr(device, 1) + addr; + *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + addr; *psize = nvkm_memory_size(memory); *pvma = ERR_PTR(-ENODEV); return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c index 46759b89fc1f..33b2321e9d87 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c @@ -57,7 +57,7 @@ nv50_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, if (ret) return ret; - *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr; + *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + (*pvma)->addr; *psize = (*pvma)->size; return nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm)); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c index c5460a14c541..4e64d8843373 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c @@ -36,7 +36,7 @@ nvkm_uvfn_map(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_vfn *vfn = nvkm_uvfn(object)->vfn; struct nvkm_device *device = vfn->subdev.device; - *addr = device->func->resource_addr(device, 0) + vfn->addr.user; + *addr = device->func->resource_addr(device, NVKM_BAR0_PRI) + vfn->addr.user; *size = vfn->func->user.size; *type = NVKM_OBJECT_MAP_IO; return 0; -- cgit v1.2.3 From 44f93b209e2afdef8524eff3a50716334cd406c6 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 25 Nov 2024 10:21:18 +1000 Subject: drm/nouveau: add support for GH100 This commit enables basic support for Hopper GPUs, and is intended primarily as a base supporting Blackwell GPUs, which reuse most of the code added here. Advanced features such as Confidential Compute are not supported. Beyond a few miscellaneous register moves and HW class ID plumbing, the bulk of the changes implemented here are to support the GSP-RM boot sequence used on Hopper/Blackwell GPUs, as well as a new page table layout. There should be no changes here that impact prior GPUs. Signed-off-by: Ben Skeggs Co-developed-by: Timur Tabi Signed-off-by: Timur Tabi Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h | 20 ++ .../drm/nouveau/include/nvhw/ref/gh100/dev_fb.h | 15 + .../nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h | 28 ++ .../drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h | 173 ++++++++++ .../nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h | 14 + .../drm/nouveau/include/nvhw/ref/gh100/dev_therm.h | 17 + .../include/nvhw/ref/gh100/dev_xtl_ep_pri.h | 10 + .../nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h | 13 + drivers/gpu/drm/nouveau/include/nvif/cl0080.h | 1 + drivers/gpu/drm/nouveau/include/nvif/class.h | 9 + drivers/gpu/drm/nouveau/include/nvkm/core/device.h | 4 + drivers/gpu/drm/nouveau/include/nvkm/core/layout.h | 1 + drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h | 1 + drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h | 22 ++ drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h | 13 + .../gpu/drm/nouveau/include/nvkm/subdev/instmem.h | 1 + drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h | 1 + drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h | 1 + drivers/gpu/drm/nouveau/nouveau_bo.c | 1 + drivers/gpu/drm/nouveau/nouveau_chan.c | 1 + drivers/gpu/drm/nouveau/nouveau_drm.c | 1 + drivers/gpu/drm/nouveau/nvif/user.c | 1 + drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 18 ++ drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h | 1 + drivers/gpu/drm/nouveau/nvkm/engine/device/user.c | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c | 30 ++ drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h | 2 + drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild | 6 + drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c | 66 ++++ drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c | 275 ++++++++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h | 28 ++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c | 2 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c | 353 +++++++++++++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h | 8 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c | 27 ++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h | 1 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 6 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c | 11 +- .../drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h | 58 ++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c | 15 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 3 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c | 4 +- drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild | 1 + .../gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c | 28 ++ drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c | 6 +- drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h | 5 + drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild | 2 + drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c | 25 ++ drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h | 2 + drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h | 7 + drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c | 306 ++++++++++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c | 3 + drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c | 30 ++ 61 files changed, 1680 insertions(+), 9 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h create mode 100644 drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h create mode 100644 drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h create mode 100644 drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h create mode 100644 drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h create mode 100644 drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h create mode 100644 drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h create mode 100644 drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h create mode 100644 drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h new file mode 100644 index 000000000000..52171b412aa1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __gh100_dev_falcon_v4_h__ +#define __gh100_dev_falcon_v4_h__ + +#define NV_PFALCON_FALCON_MAILBOX0 0x00000040 /* RW-4R */ +#define NV_PFALCON_FALCON_MAILBOX0_DATA 31:0 /* RWIVF */ +#define NV_PFALCON_FALCON_MAILBOX0_DATA_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_MAILBOX1 0x00000044 /* RW-4R */ +#define NV_PFALCON_FALCON_MAILBOX1_DATA 31:0 /* RWIVF */ +#define NV_PFALCON_FALCON_MAILBOX1_DATA_INIT 0x00000000 /* RWI-V */ + +#define NV_PFALCON_FALCON_HWCFG2 0x000000f4 /* R--4R */ +#define NV_PFALCON_FALCON_HWCFG2_RISCV_BR_PRIV_LOCKDOWN 13:13 /* R--VF */ +#define NV_PFALCON_FALCON_HWCFG2_RISCV_BR_PRIV_LOCKDOWN_LOCK 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG2_RISCV_BR_PRIV_LOCKDOWN_UNLOCK 0x00000000 /* R---V */ + +#endif // __gh100_dev_falcon_v4_h__ diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h new file mode 100644 index 000000000000..819f09465952 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __gh100_dev_fb_h_ +#define __gh100_dev_fb_h_ + +#define NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT 8 /* */ +#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO 0x00100A34 /* RW-4R */ +#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */ +#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI 0x00100A38 /* RW-4R */ +#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */ +#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000FFFFF /* ----V */ + +#endif // __gh100_dev_fb_h_ diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h new file mode 100644 index 000000000000..e9507242cae5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __gh100_dev_fsp_pri_h__ +#define __gh100_dev_fsp_pri_h__ + +#define NV_PFSP 0x8F3FFF:0x8F0000 /* RW--D */ + +#define NV_PFSP_MSGQ_HEAD(i) (0x008F2c80+(i)*8) /* RW-4A */ +#define NV_PFSP_MSGQ_HEAD__SIZE_1 8 /* */ +#define NV_PFSP_MSGQ_HEAD_VAL 31:0 /* RWIUF */ +#define NV_PFSP_MSGQ_HEAD_VAL_INIT 0x00000000 /* RWI-V */ +#define NV_PFSP_MSGQ_TAIL(i) (0x008F2c84+(i)*8) /* RW-4A */ +#define NV_PFSP_MSGQ_TAIL__SIZE_1 8 /* */ +#define NV_PFSP_MSGQ_TAIL_VAL 31:0 /* RWIUF */ +#define NV_PFSP_MSGQ_TAIL_VAL_INIT 0x00000000 /* RWI-V */ + +#define NV_PFSP_QUEUE_HEAD(i) (0x008F2c00+(i)*8) /* RW-4A */ +#define NV_PFSP_QUEUE_HEAD__SIZE_1 8 /* */ +#define NV_PFSP_QUEUE_HEAD_ADDRESS 31:0 /* RWIVF */ +#define NV_PFSP_QUEUE_HEAD_ADDRESS_INIT 0x00000000 /* RWI-V */ +#define NV_PFSP_QUEUE_TAIL(i) (0x008F2c04+(i)*8) /* RW-4A */ +#define NV_PFSP_QUEUE_TAIL__SIZE_1 8 /* */ +#define NV_PFSP_QUEUE_TAIL_ADDRESS 31:0 /* RWIVF */ +#define NV_PFSP_QUEUE_TAIL_ADDRESS_INIT 0x00000000 /* RWI-V */ + +#endif // __gh100_dev_fsp_pri_h__ diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h new file mode 100644 index 000000000000..6707e0e3b96b --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __gh100_dev_mmu_h__ +#define __gh100_dev_mmu_h__ + +#define NV_MMU_PTE /* ----G */ +#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */ +#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PTE_KIND (1*32+7):(1*32+4) /* RWXVF */ +#define NV_MMU_PTE_KIND_INVALID 0x07 /* R---V */ +#define NV_MMU_PTE_KIND_PITCH 0x00 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_MEMORY 0x6 /* R---V */ +#define NV_MMU_PTE_KIND_Z16 0x1 /* R---V */ +#define NV_MMU_PTE_KIND_S8 0x2 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24 0x3 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8 0x4 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8 0x5 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE 0x8 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC 0x9 /* R---V */ +#define NV_MMU_PTE_KIND_S8_COMPRESSIBLE_DISABLE_PLC 0xA /* R---V */ +#define NV_MMU_PTE_KIND_Z16_COMPRESSIBLE_DISABLE_PLC 0xB /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_COMPRESSIBLE_DISABLE_PLC 0xC /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_COMPRESSIBLE_DISABLE_PLC 0xD /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC 0xE /* R---V */ +#define NV_MMU_PTE_KIND_SMSKED_MESSAGE 0xF /* R---V */ + +#define NV_MMU_VER3_PDE /* ----G */ +#define NV_MMU_VER3_PDE_IS_PTE 0:0 /* RWXVF */ +#define NV_MMU_VER3_PDE_IS_PTE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER3_PDE_IS_PTE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER3_PDE_VALID 0:0 /* RWXVF */ +#define NV_MMU_VER3_PDE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER3_PDE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER3_PDE_APERTURE 2:1 /* RWXVF */ +#define NV_MMU_VER3_PDE_APERTURE_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER3_PDE_APERTURE_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER3_PDE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER3_PDE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER3_PDE_PCF 5:3 /* RWXVF */ +#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */ +#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */ +#define NV_MMU_VER3_PDE_PCF_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */ +#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */ +#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */ +#define NV_MMU_VER3_PDE_PCF_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */ +#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */ +#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */ +#define NV_MMU_VER3_PDE_PCF_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */ +#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */ +#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */ +#define NV_MMU_VER3_PDE_PCF_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */ +#define NV_MMU_VER3_PDE_ADDRESS 51:12 /* RWXVF */ +#define NV_MMU_VER3_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER3_PDE__SIZE 8 + +#define NV_MMU_VER3_DUAL_PDE /* ----G */ +#define NV_MMU_VER3_DUAL_PDE_IS_PTE 0:0 /* RWXVF */ +#define NV_MMU_VER3_DUAL_PDE_IS_PTE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_IS_PTE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_VALID 0:0 /* RWXVF */ +#define NV_MMU_VER3_DUAL_PDE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG 2:1 /* RWXVF */ +#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_BIG 5:3 /* RWXVF */ +#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_ADDRESS_BIG 51:8 /* RWXVF */ +#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL 66:65 /* RWXVF */ +#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL 69:67 /* RWXVF */ +#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */ +#define NV_MMU_VER3_DUAL_PDE_ADDRESS_SMALL 115:76 /* RWXVF */ +#define NV_MMU_VER3_DUAL_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER3_DUAL_PDE_ADDRESS_BIG_SHIFT 8 /* */ +#define NV_MMU_VER3_DUAL_PDE__SIZE 16 + +#define NV_MMU_VER3_PTE /* ----G */ +#define NV_MMU_VER3_PTE_VALID 0:0 /* RWXVF */ +#define NV_MMU_VER3_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER3_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER3_PTE_APERTURE 2:1 /* RWXVF */ +#define NV_MMU_VER3_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_VER3_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER3_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER3_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF 7:3 /* RWXVF */ +#define NV_MMU_VER3_PTE_PCF_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_SPARSE 0x00000001 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_MAPPING_NOWHERE 0x00000002 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_NO_VALID_4KB_PAGE 0x00000003 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACE 0x00000000 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACE 0x00000001 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACE 0x00000002 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACE 0x00000003 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACE 0x00000004 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACE 0x00000005 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACE 0x00000006 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACE 0x00000007 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACE 0x00000008 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACE 0x00000009 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_CACHED_ACE 0x0000000A /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_UNCACHED_ACE 0x0000000B /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACE 0x0000000C /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACE 0x0000000D /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_CACHED_ACE 0x0000000E /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACE 0x0000000F /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACD 0x00000010 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACD 0x00000011 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACD 0x00000012 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACD 0x00000013 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACD 0x00000014 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACD 0x00000015 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACD 0x00000016 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACD 0x00000017 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACD 0x00000018 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACD 0x00000019 /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_CACHED_ACD 0x0000001A /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_UNCACHED_ACD 0x0000001B /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACD 0x0000001C /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACD 0x0000001D /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_CACHED_ACD 0x0000001E /* RW--V */ +#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACD 0x0000001F /* RW--V */ +#define NV_MMU_VER3_PTE_KIND 11:8 /* RWXVF */ +#define NV_MMU_VER3_PTE_ADDRESS 51:12 /* RWXVF */ +#define NV_MMU_VER3_PTE_ADDRESS_SYS 51:12 /* RWXVF */ +#define NV_MMU_VER3_PTE_ADDRESS_PEER 51:12 /* RWXVF */ +#define NV_MMU_VER3_PTE_ADDRESS_VID 39:12 /* RWXVF */ +#define NV_MMU_VER3_PTE_PEER_ID 63:(64-3) /* RWXVF */ +#define NV_MMU_VER3_PTE_PEER_ID_0 0x00000000 /* RW--V */ +#define NV_MMU_VER3_PTE_PEER_ID_1 0x00000001 /* RW--V */ +#define NV_MMU_VER3_PTE_PEER_ID_2 0x00000002 /* RW--V */ +#define NV_MMU_VER3_PTE_PEER_ID_3 0x00000003 /* RW--V */ +#define NV_MMU_VER3_PTE_PEER_ID_4 0x00000004 /* RW--V */ +#define NV_MMU_VER3_PTE_PEER_ID_5 0x00000005 /* RW--V */ +#define NV_MMU_VER3_PTE_PEER_ID_6 0x00000006 /* RW--V */ +#define NV_MMU_VER3_PTE_PEER_ID_7 0x00000007 /* RW--V */ +#define NV_MMU_VER3_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER3_PTE__SIZE 8 + +#endif // __gh100_dev_mmu_h__ diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h new file mode 100644 index 000000000000..8ff4663168d2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __gh100_dev_riscv_pri_h__ +#define __gh100_dev_riscv_pri_h__ + +#define NV_PRISCV_RISCV_CPUCTL 0x00000388 /* RW-4R */ +#define NV_PRISCV_RISCV_CPUCTL_HALTED 4:4 /* R-IVF */ +#define NV_PRISCV_RISCV_CPUCTL_HALTED_INIT 0x00000001 /* R-I-V */ +#define NV_PRISCV_RISCV_CPUCTL_HALTED_TRUE 0x00000001 /* R---V */ +#define NV_PRISCV_RISCV_CPUCTL_HALTED_FALSE 0x00000000 /* R---V */ + +#endif // __gh100_dev_riscv_pri_h__ diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h new file mode 100644 index 000000000000..49b4816cb00b --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __gh100_dev_therm_h__ +#define __gh100_dev_therm_h__ + +#define NV_THERM_I2CS_SCRATCH 0x000200bc /* RW-4R */ +#define NV_THERM_I2CS_SCRATCH_DATA 31:0 /* RWIVF */ +#define NV_THERM_I2CS_SCRATCH_DATA_INIT 0x00000000 /* RWI-V */ + +#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE NV_THERM_I2CS_SCRATCH +#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS 31:0 +#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS 0x000000FF +#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_FAILED 0x00000000 + +#endif // __gh100_dev_therm_h__ diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h new file mode 100644 index 000000000000..12b49e9894a2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __gh100_dev_xtl_ep_pri_h__ +#define __gh100_dev_xtl_ep_pri_h__ + +#define NV_EP_PCFGM 0x92FFF:0x92000 /* RW--D */ + +#endif // __gh100_dev_xtl_ep_pri_h__ diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h new file mode 100644 index 000000000000..1a891bd33fa3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __gh100_pri_nv_xal_ep_h__ +#define __gh100_pri_nv_xal_ep_h__ + +#define NV_XAL_EP_BAR0_WINDOW_BASE_SHIFT 0x000010 +#define NV_XAL_EP_BAR0_WINDOW_BASE 21:0 +#define NV_XAL_EP_BAR0_WINDOW 0x0010fd40 + +#endif // __gh100_pri_nv_xal_ep_h__ + diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h index ea937fa7bc55..60a52ef52071 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h +++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h @@ -29,6 +29,7 @@ struct nv_device_info_v0 { #define NV_DEVICE_INFO_V0_TURING 0x0c #define NV_DEVICE_INFO_V0_AMPERE 0x0d #define NV_DEVICE_INFO_V0_ADA 0x0e +#define NV_DEVICE_INFO_V0_HOPPER 0x0f __u8 family; __u8 pad06[2]; __u64 ram_size; diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index 71a2a53bff7f..83acf367a65c 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -63,6 +63,7 @@ #define VOLTA_USERMODE_A 0x0000c361 #define TURING_USERMODE_A 0x0000c461 #define AMPERE_USERMODE_A 0x0000c561 +#define HOPPER_USERMODE_A 0x0000c661 #define MAXWELL_FAULT_BUFFER_A /* clb069.h */ 0x0000b069 #define VOLTA_FAULT_BUFFER_A /* clb069.h */ 0x0000c369 @@ -85,6 +86,7 @@ #define TURING_CHANNEL_GPFIFO_A /* if0020.h */ 0x0000c46f #define AMPERE_CHANNEL_GPFIFO_A /* if0020.h */ 0x0000c56f #define AMPERE_CHANNEL_GPFIFO_B /* if0020.h */ 0x0000c76f +#define HOPPER_CHANNEL_GPFIFO_A 0x0000c86f #define NV50_DISP /* if0010.h */ 0x00005070 #define G82_DISP /* if0010.h */ 0x00008270 @@ -194,8 +196,11 @@ #define ADA_A /* cl9097.h */ 0x0000c997 +#define HOPPER_A 0x0000cb97 + #define NV74_BSP 0x000074b0 +#define NVB8B0_VIDEO_DECODER 0x0000b8b0 #define NVC4B0_VIDEO_DECODER 0x0000c4b0 #define NVC6B0_VIDEO_DECODER 0x0000c6b0 #define NVC7B0_VIDEO_DECODER 0x0000c7b0 @@ -228,6 +233,7 @@ #define TURING_DMA_COPY_A 0x0000c5b5 #define AMPERE_DMA_COPY_A 0x0000c6b5 #define AMPERE_DMA_COPY_B 0x0000c7b5 +#define HOPPER_DMA_COPY_A 0x0000c8b5 #define NVC4B7_VIDEO_ENCODER 0x0000c4b7 #define NVC7B7_VIDEO_ENCODER 0x0000c7b7 @@ -250,12 +256,15 @@ #define AMPERE_COMPUTE_A 0x0000c6c0 #define AMPERE_COMPUTE_B 0x0000c7c0 #define ADA_COMPUTE_A 0x0000c9c0 +#define HOPPER_COMPUTE_A 0x0000cbc0 #define NV74_CIPHER 0x000074c1 +#define NVB8D1_VIDEO_NVJPG 0x0000b8d1 #define NVC4D1_VIDEO_NVJPG 0x0000c4d1 #define NVC9D1_VIDEO_NVJPG 0x0000c9d1 +#define NVB8FA_VIDEO_OFA 0x0000b8fa #define NVC6FA_VIDEO_OFA 0x0000c6fa #define NVC7FA_VIDEO_OFA 0x0000c7fa #define NVC9FA_VIDEO_OFA 0x0000c9fa diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index f50f52d4dc3f..926542350abc 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h @@ -46,6 +46,7 @@ struct nvkm_device { GV100 = 0x140, TU100 = 0x160, GA100 = 0x170, + GH100 = 0x180, AD100 = 0x190, } card_type; u32 chipset; @@ -131,6 +132,9 @@ struct nvkm_device *nvkm_device_find(u64 name); _temp; \ }) +#define NVKM_RD32_(p,o,dr) nvkm_rd32((p), (o) + (dr)) +#define NVKM_RD32(p,A...) DRF_RV(NVKM_RD32_, (p), 0, ##A) + void nvkm_device_del(struct nvkm_device **); struct nvkm_device_oclass { diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h index 2debef27bd95..d92ffd17b729 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h @@ -1,4 +1,5 @@ /* SPDX-License-Identifier: MIT */ +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FSP , struct nvkm_fsp , fsp) NVKM_LAYOUT_ONCE(NVKM_SUBDEV_GSP , struct nvkm_gsp , gsp) NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TOP , struct nvkm_top , top) NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VFN , struct nvkm_vfn , vfn) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h index 5b798a1a313d..c114903ce388 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h @@ -102,6 +102,7 @@ int gv100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n int tu102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); int ga100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); int ga102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int gh100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); #include #include diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h new file mode 100644 index 000000000000..2a8c1d5a65f9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __NVKM_FSP_H__ +#define __NVKM_FSP_H__ +#include +#include + +struct nvkm_fsp { + const struct nvkm_fsp_func *func; + struct nvkm_subdev subdev; + + struct nvkm_falcon falcon; +}; + +bool nvkm_fsp_verify_gsp_fmc(struct nvkm_fsp *, u32 hash_size, u32 pkey_size, u32 sig_size); +int nvkm_fsp_boot_gsp_fmc(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume, + u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig); + +int gh100_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 4ad07f3ced69..8f611b2503b7 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -68,6 +68,9 @@ struct nvkm_gsp { const struct firmware *load; const struct firmware *unload; } booter; + + const struct firmware *fmc; + const struct firmware *bl; const struct firmware *rm; } fws; @@ -113,6 +116,15 @@ struct nvkm_gsp { struct nvkm_falcon_fw unload; } booter; + struct { + struct nvkm_gsp_mem fw; + u8 *hash; + u8 *pkey; + u8 *sig; + + struct nvkm_gsp_mem args; + } fmc; + struct { struct nvkm_gsp_mem fw; u32 code_offset; @@ -478,5 +490,6 @@ int tu102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_ int tu116_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); int ga100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); int ga102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); +int gh100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); int ad102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h index 7d93c742ee59..db835cf7b8ac 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h @@ -36,4 +36,5 @@ int nv04_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nv int nv40_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **); int nv50_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **); int gk20a_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **); +int gh100_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h index 4cab139f3236..abcb0dbcde70 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h @@ -166,4 +166,5 @@ int gp100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct int gp10b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); int gv100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); int tu102_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); +int gh100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h index 3c103101d5fc..112b674ed9c8 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h @@ -50,6 +50,7 @@ int gf100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct int gf106_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **); int gk104_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **); int gp100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **); +int gh100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **); /* pcie functions */ int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 9ab8380feb39..fbe0144927e8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -923,6 +923,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) struct ttm_resource *, struct ttm_resource *); int (*init)(struct nouveau_channel *, u32 handle); } _methods[] = { + { "COPY", 4, 0xc8b5, nve0_bo_move_copy, nve0_bo_move_init }, { "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init }, { "GRCE", 0, 0xc7b5, nve0_bo_move_copy, nvc0_bo_move_init }, { "COPY", 4, 0xc6b5, nve0_bo_move_copy, nve0_bo_move_init }, diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 5bcd29809c1e..a14aa6715bb9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -255,6 +255,7 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm, struct nouveau_channel **pchan) { const struct nvif_mclass hosts[] = { + { HOPPER_CHANNEL_GPFIFO_A, 0 }, { AMPERE_CHANNEL_GPFIFO_B, 0 }, { AMPERE_CHANNEL_GPFIFO_A, 0 }, { TURING_CHANNEL_GPFIFO_A, 0 }, diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index e7544942791d..5b6bb4c2f78b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -509,6 +509,7 @@ nouveau_accel_init(struct nouveau_drm *drm) case TURING_CHANNEL_GPFIFO_A: case AMPERE_CHANNEL_GPFIFO_A: case AMPERE_CHANNEL_GPFIFO_B: + case HOPPER_CHANNEL_GPFIFO_A: ret = gv100_fence_create(drm); break; default: diff --git a/drivers/gpu/drm/nouveau/nvif/user.c b/drivers/gpu/drm/nouveau/nvif/user.c index b648a5e036af..ae470a1fdfb8 100644 --- a/drivers/gpu/drm/nouveau/nvif/user.c +++ b/drivers/gpu/drm/nouveau/nvif/user.c @@ -41,6 +41,7 @@ nvif_user_ctor(struct nvif_device *device, const char *name) int version; const struct nvif_user_func *func; } users[] = { + { HOPPER_USERMODE_A, -1, &nvif_userc361 }, { AMPERE_USERMODE_A, -1, &nvif_userc361 }, { TURING_USERMODE_A, -1, &nvif_userc361 }, { VOLTA_USERMODE_A, -1, &nvif_userc361 }, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index ebcaf2ecff48..5082fe5f1966 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2681,6 +2681,22 @@ nv177_chipset = { .sec2 = { 0x00000001, ga102_sec2_new }, }; +static const struct nvkm_device_chip +nv180_chipset = { + .name = "GH100", + .bar = { 0x00000001, tu102_bar_new }, + .fault = { 0x00000001, tu102_fault_new }, + .fb = { 0x00000001, gh100_fb_new }, + .fsp = { 0x00000001, gh100_fsp_new }, + .gsp = { 0x00000001, gh100_gsp_new }, + .imem = { 0x00000001, gh100_instmem_new }, + .mmu = { 0x00000001, gh100_mmu_new }, + .pci = { 0x00000001, gh100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + static const struct nvkm_device_chip nv192_chipset = { .name = "AD102", @@ -3101,6 +3117,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x140: device->card_type = GV100; break; case 0x160: device->card_type = TU100; break; case 0x170: device->card_type = GA100; break; + case 0x180: device->card_type = GH100; break; case 0x190: device->card_type = AD100; break; default: break; @@ -3204,6 +3221,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x174: device->chip = &nv174_chipset; break; case 0x176: device->chip = &nv176_chipset; break; case 0x177: device->chip = &nv177_chipset; break; + case 0x180: device->chip = &nv180_chipset; break; case 0x192: device->chip = &nv192_chipset; break; case 0x193: device->chip = &nv193_chipset; break; case 0x194: device->chip = &nv194_chipset; break; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h index 8da5e896dd74..75ee7506d443 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c index 1f331ec8d747..57c2678022b5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c @@ -148,6 +148,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size) case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break; case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break; case AD100: args->v0.family = NV_DEVICE_INFO_V0_ADA; break; + case GH100: args->v0.family = NV_DEVICE_INFO_V0_HOPPER; break; default: args->v0.family = 0; break; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild index 4c2f6fc4ef58..c19ea4ea9bd3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild @@ -9,6 +9,7 @@ include $(src)/nvkm/subdev/fault/Kbuild include $(src)/nvkm/subdev/fb/Kbuild include $(src)/nvkm/subdev/fuse/Kbuild include $(src)/nvkm/subdev/gpio/Kbuild +include $(src)/nvkm/subdev/fsp/Kbuild include $(src)/nvkm/subdev/gsp/Kbuild include $(src)/nvkm/subdev/i2c/Kbuild include $(src)/nvkm/subdev/iccsense/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild index d1611ad3bf81..f13312934131 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild @@ -35,6 +35,7 @@ nvkm-y += nvkm/subdev/fb/gv100.o nvkm-y += nvkm/subdev/fb/tu102.o nvkm-y += nvkm/subdev/fb/ga100.o nvkm-y += nvkm/subdev/fb/ga102.o +nvkm-y += nvkm/subdev/fb/gh100.o nvkm-y += nvkm/subdev/fb/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c index 25f82b372bca..2819780050d8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c @@ -25,7 +25,7 @@ #include #include -static u64 +u64 ga102_fb_vidmem_size(struct nvkm_fb *fb) { return (u64)nvkm_rd32(fb->subdev.device, 0x1183a4) << 20; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c new file mode 100644 index 000000000000..2d8c51f882d5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include +#include + +static void +gh100_fb_sysmem_flush_page_init(struct nvkm_fb *fb) +{ + const u64 addr = fb->sysmem.flush_page_addr >> NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT; + struct nvkm_device *device = fb->subdev.device; + + nvkm_wr32(device, NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI, upper_32_bits(addr)); + nvkm_wr32(device, NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO, lower_32_bits(addr)); +} + +static const struct nvkm_fb_func +gh100_fb = { + .sysmem.flush_page_init = gh100_fb_sysmem_flush_page_init, + .vidmem.size = ga102_fb_vidmem_size, +}; + +int +gh100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) +{ + return r535_fb_new(&gh100_fb, device, type, inst, pfb); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h index 35c55dfba23d..ebe996503ab2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h @@ -98,4 +98,6 @@ int gp102_fb_vpr_scrub(struct nvkm_fb *); int gv100_fb_init_page(struct nvkm_fb *); bool tu102_fb_vpr_scrub_required(struct nvkm_fb *); + +u64 ga102_fb_vidmem_size(struct nvkm_fb *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild new file mode 100644 index 000000000000..ff04992b181d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + +nvkm-y += nvkm/subdev/fsp/base.o +nvkm-y += nvkm/subdev/fsp/gh100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c new file mode 100644 index 000000000000..e366a980baa9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +int +nvkm_fsp_boot_gsp_fmc(struct nvkm_fsp *fsp, u64 args_addr, u32 rsvd_size, bool resume, + u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig) +{ + return fsp->func->cot.boot_gsp_fmc(fsp, args_addr, rsvd_size, resume, + img_addr, hash, pkey, sig); +} + +bool +nvkm_fsp_verify_gsp_fmc(struct nvkm_fsp *fsp, u32 hash_size, u32 pkey_size, u32 sig_size) +{ + return hash_size == fsp->func->cot.size_hash && + pkey_size == fsp->func->cot.size_pkey && + sig_size == fsp->func->cot.size_sig; +} + +static int +nvkm_fsp_preinit(struct nvkm_subdev *subdev) +{ + struct nvkm_fsp *fsp = nvkm_fsp(subdev); + + return fsp->func->wait_secure_boot(fsp); +} + +static void * +nvkm_fsp_dtor(struct nvkm_subdev *subdev) +{ + struct nvkm_fsp *fsp = nvkm_fsp(subdev); + + nvkm_falcon_dtor(&fsp->falcon); + return fsp; +} + +static const struct nvkm_falcon_func +nvkm_fsp_flcn = { + .emem_pio = &gp102_flcn_emem_pio, +}; + +static const struct nvkm_subdev_func +nvkm_fsp = { + .dtor = nvkm_fsp_dtor, + .preinit = nvkm_fsp_preinit, +}; + +int +nvkm_fsp_new_(const struct nvkm_fsp_func *func, + struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fsp **pfsp) +{ + struct nvkm_fsp *fsp; + + fsp = *pfsp = kzalloc(sizeof(*fsp), GFP_KERNEL); + if (!fsp) + return -ENOMEM; + + fsp->func = func; + nvkm_subdev_ctor(&nvkm_fsp, device, type, inst, &fsp->subdev); + + return nvkm_falcon_ctor(&nvkm_fsp_flcn, &fsp->subdev, "fsp", 0x8f2000, &fsp->falcon); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c new file mode 100644 index 000000000000..9f4285af3fed --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c @@ -0,0 +1,275 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include +#include +#include + +#include + +#define MCTP_HEADER_VERSION 3:0 +#define MCTP_HEADER_RSVD 7:4 + +#define MCTP_HEADER_DEID 15:8 +#define MCTP_HEADER_SEID 23:16 + +#define MCTP_HEADER_TAG 26:24 +#define MCTP_HEADER_TO 27:27 +#define MCTP_HEADER_SEQ 29:28 +#define MCTP_HEADER_EOM 30:30 +#define MCTP_HEADER_SOM 31:31 + +#define MCTP_MSG_HEADER_TYPE 6:0 +#define MCTP_MSG_HEADER_IC 7:7 + +#define MCTP_MSG_HEADER_VENDOR_ID 23:8 +#define MCTP_MSG_HEADER_NVDM_TYPE 31:24 + +#define MCTP_MSG_HEADER_TYPE_VENDOR_PCI 0x7e +#define MCTP_MSG_HEADER_VENDOR_ID_NV 0x10de + +#define NVDM_TYPE_COT 0x14 +#define NVDM_TYPE_FSP_RESPONSE 0x15 + +#pragma pack(1) +typedef struct nvdm_payload_cot +{ + NvU16 version; + NvU16 size; + NvU64 gspFmcSysmemOffset; + NvU64 frtsSysmemOffset; + NvU32 frtsSysmemSize; + + // Note this is an offset from the end of FB + NvU64 frtsVidmemOffset; + NvU32 frtsVidmemSize; + + // Authentication related fields + NvU32 hash384[12]; + NvU32 publicKey[96]; + NvU32 signature[96]; + + NvU64 gspBootArgsSysmemOffset; +} NVDM_PAYLOAD_COT; +#pragma pack() + +#pragma pack(1) +typedef struct +{ + NvU32 taskId; + NvU32 commandNvdmType; + NvU32 errorCode; +} NVDM_PAYLOAD_COMMAND_RESPONSE; +#pragma pack() + +static u32 +gh100_fsp_poll(struct nvkm_fsp *fsp) +{ + struct nvkm_device *device = fsp->subdev.device; + u32 head, tail; + + head = nvkm_rd32(device, NV_PFSP_MSGQ_HEAD(0)); + tail = nvkm_rd32(device, NV_PFSP_MSGQ_TAIL(0)); + + if (head == tail) + return 0; + + return (tail - head) + sizeof(u32); /* TAIL points at last DWORD written. */ +} + +static int +gh100_fsp_recv(struct nvkm_fsp *fsp, u8 *packet, u32 max_packet_size) +{ + struct nvkm_device *device = fsp->subdev.device; + u32 packet_size; + int ret; + + packet_size = gh100_fsp_poll(fsp); + if (!packet_size || WARN_ON(packet_size % 4 || packet_size > max_packet_size)) + return -EINVAL; + + ret = nvkm_falcon_pio_rd(&fsp->falcon, 0, EMEM, 0, packet, 0, packet_size); + if (ret) + return ret; + + nvkm_wr32(device, NV_PFSP_MSGQ_TAIL(0), 0); + nvkm_wr32(device, NV_PFSP_MSGQ_HEAD(0), 0); + + return packet_size; +} + +static int +gh100_fsp_wait(struct nvkm_fsp *fsp) +{ + int time = 1000; + + do { + if (gh100_fsp_poll(fsp)) + return 0; + + usleep_range(1000, 2000); + } while(time--); + + return -ETIMEDOUT; +} + +static int +gh100_fsp_send(struct nvkm_fsp *fsp, const u8 *packet, u32 packet_size) +{ + struct nvkm_device *device = fsp->subdev.device; + int time = 1000, ret; + + if (WARN_ON(packet_size % sizeof(u32))) + return -EINVAL; + + /* Ensure any previously sent message has been consumed. */ + do { + u32 head = nvkm_rd32(device, NV_PFSP_QUEUE_HEAD(0)); + u32 tail = nvkm_rd32(device, NV_PFSP_QUEUE_TAIL(0)); + + if (tail == head) + break; + + usleep_range(1000, 2000); + } while(time--); + + if (time < 0) + return -ETIMEDOUT; + + /* Write message to EMEM. */ + ret = nvkm_falcon_pio_wr(&fsp->falcon, packet, 0, 0, EMEM, 0, packet_size, 0, false); + if (ret) + return ret; + + /* Update queue pointers - TAIL points at last DWORD written. */ + nvkm_wr32(device, NV_PFSP_QUEUE_TAIL(0), packet_size - sizeof(u32)); + nvkm_wr32(device, NV_PFSP_QUEUE_HEAD(0), 0); + return 0; +} + +static int +gh100_fsp_send_sync(struct nvkm_fsp *fsp, u8 nvdm_type, const u8 *packet, u32 packet_size) +{ + struct nvkm_subdev *subdev = &fsp->subdev; + struct { + u32 mctp_header; + u32 nvdm_header; + NVDM_PAYLOAD_COMMAND_RESPONSE response; + } reply; + int ret; + + ret = gh100_fsp_send(fsp, packet, packet_size); + if (ret) + return ret; + + ret = gh100_fsp_wait(fsp); + if (ret) + return ret; + + ret = gh100_fsp_recv(fsp, (u8 *)&reply, sizeof(reply)); + if (ret < 0) + return ret; + + if (NVVAL_TEST(reply.mctp_header, MCTP, HEADER, SOM, !=, 1) || + NVVAL_TEST(reply.mctp_header, MCTP, HEADER, EOM, !=, 1)) { + nvkm_error(subdev, "unexpected MCTP header in reply: 0x%08x\n", reply.mctp_header); + return -EIO; + } + + if (NVDEF_TEST(reply.nvdm_header, MCTP, MSG_HEADER, TYPE, !=, VENDOR_PCI) || + NVDEF_TEST(reply.nvdm_header, MCTP, MSG_HEADER, VENDOR_ID, !=, NV) || + NVVAL_TEST(reply.nvdm_header, MCTP, MSG_HEADER, NVDM_TYPE, !=, NVDM_TYPE_FSP_RESPONSE)) { + nvkm_error(subdev, "unexpected NVDM header in reply: 0x%08x\n", reply.nvdm_header); + return -EIO; + } + + if (reply.response.commandNvdmType != nvdm_type) { + nvkm_error(subdev, "expected NVDM type 0x%02x in reply, got 0x%02x\n", + nvdm_type, reply.response.commandNvdmType); + return -EIO; + } + + if (reply.response.errorCode) { + nvkm_error(subdev, "NVDM command 0x%02x failed with error 0x%08x\n", + nvdm_type, reply.response.errorCode); + return -EIO; + } + + return 0; +} + +int +gh100_fsp_boot_gsp_fmc(struct nvkm_fsp *fsp, u64 args_addr, u32 rsvd_size, bool resume, + u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig) +{ + struct { + u32 mctp_header; + u32 nvdm_header; + NVDM_PAYLOAD_COT cot; + } msg = {}; + + msg.mctp_header = NVVAL(MCTP, HEADER, SOM, 1) | + NVVAL(MCTP, HEADER, EOM, 1) | + NVVAL(MCTP, HEADER, SEID, 0) | + NVVAL(MCTP, HEADER, SEQ, 0); + + msg.nvdm_header = NVDEF(MCTP, MSG_HEADER, TYPE, VENDOR_PCI) | + NVDEF(MCTP, MSG_HEADER, VENDOR_ID, NV) | + NVVAL(MCTP, MSG_HEADER, NVDM_TYPE, NVDM_TYPE_COT); + + msg.cot.version = fsp->func->cot.version; + msg.cot.size = sizeof(msg.cot); + msg.cot.gspFmcSysmemOffset = img_addr; + if (!resume) { + msg.cot.frtsVidmemOffset = ALIGN(rsvd_size, 0x200000); + msg.cot.frtsVidmemSize = 0x100000; + } + + memcpy(msg.cot.hash384, hash, fsp->func->cot.size_hash); + memcpy(msg.cot.publicKey, pkey, fsp->func->cot.size_pkey); + memcpy(msg.cot.signature, sig, fsp->func->cot.size_sig); + + msg.cot.gspBootArgsSysmemOffset = args_addr; + + return gh100_fsp_send_sync(fsp, NVDM_TYPE_COT, (const u8 *)&msg, sizeof(msg)); +} + +static int +gh100_fsp_wait_secure_boot(struct nvkm_fsp *fsp) +{ + struct nvkm_device *device = fsp->subdev.device; + unsigned timeout_ms = 4000; + + do { + u32 status = NVKM_RD32(device, NV_THERM, I2CS_SCRATCH, FSP_BOOT_COMPLETE_STATUS); + + if (status == NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS) + return 0; + + usleep_range(1000, 2000); + } while (timeout_ms--); + + return -ETIMEDOUT; +} + +static const struct nvkm_fsp_func +gh100_fsp = { + .wait_secure_boot = gh100_fsp_wait_secure_boot, + .cot = { + .version = 1, + .size_hash = 48, + .size_pkey = 384, + .size_sig = 384, + .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc, + }, +}; + +int +gh100_fsp_new(struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp) +{ + return nvkm_fsp_new_(&gh100_fsp, device, type, inst, pfsp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h new file mode 100644 index 000000000000..91517f3dedfb --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __NVKM_FSP_PRIV_H__ +#define __NVKM_FSP_PRIV_H__ +#define nvkm_fsp(p) container_of((p), struct nvkm_fsp, subdev) +#include + +struct nvkm_fsp_func { + int (*wait_secure_boot)(struct nvkm_fsp *); + + struct { + u32 version; + u32 size_hash; + u32 size_pkey; + u32 size_sig; + int (*boot_gsp_fmc)(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume, + u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig); + } cot; +}; + +int nvkm_fsp_new_(const struct nvkm_fsp_func *, + struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **); + +int gh100_fsp_boot_gsp_fmc(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume, + u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild index ba892c111c26..3c6c1309c4b4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild @@ -7,6 +7,7 @@ nvkm-y += nvkm/subdev/gsp/tu102.o nvkm-y += nvkm/subdev/gsp/tu116.o nvkm-y += nvkm/subdev/gsp/ga100.o nvkm-y += nvkm/subdev/gsp/ga102.o +nvkm-y += nvkm/subdev/gsp/gh100.o nvkm-y += nvkm/subdev/gsp/ad102.o include $(src)/nvkm/subdev/gsp/rm/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c index 3a452349afde..d23243a83a4c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c @@ -83,6 +83,8 @@ nvkm_gsp_oneinit(struct nvkm_subdev *subdev) void nvkm_gsp_dtor_fws(struct nvkm_gsp *gsp) { + nvkm_firmware_put(gsp->fws.fmc); + gsp->fws.fmc = NULL; nvkm_firmware_put(gsp->fws.bl); gsp->fws.bl = NULL; nvkm_firmware_put(gsp->fws.booter.unload); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c new file mode 100644 index 000000000000..3ad71696c111 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c @@ -0,0 +1,353 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include +#include + +#include +#include + +#include + +#include +#include +#include + +static int +gh100_gsp_fini(struct nvkm_gsp *gsp, bool suspend) +{ + struct nvkm_falcon *falcon = &gsp->falcon; + int ret, time = 4000; + + /* Shutdown RM. */ + ret = r535_gsp_fini(gsp, suspend); + if (ret && suspend) + return ret; + + /* Wait for RISC-V to halt. */ + do { + u32 data = nvkm_falcon_rd32(falcon, falcon->addr2 + NV_PRISCV_RISCV_CPUCTL); + + if (NVVAL_GET(data, NV_PRISCV, RISCV_CPUCTL, HALTED)) + return 0; + + usleep_range(1000, 2000); + } while(time--); + + return -ETIMEDOUT; +} + +static bool +gh100_gsp_lockdown_released(struct nvkm_gsp *gsp, u32 *mbox0) +{ + u32 data; + + /* Wait for GSP access via BAR0 to be allowed. */ + *mbox0 = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_MAILBOX0); + + if (*mbox0 && (*mbox0 & 0xffffff00) == 0xbadf4100) + return false; + + /* Check if an error code has been reported. */ + if (*mbox0) { + u32 mbox1 = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_MAILBOX1); + + /* Any value that's not GSP_FMC_BOOT_PARAMS addr is an error. */ + if ((((u64)mbox1 << 32) | *mbox0) != gsp->fmc.args.addr) + return true; + } + + /* Check if lockdown has been released. */ + data = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_HWCFG2); + return !NVVAL_GET(data, NV_PFALCON, FALCON_HWCFG2, RISCV_BR_PRIV_LOCKDOWN); +} + +static int +gh100_gsp_init(struct nvkm_gsp *gsp) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + const bool resume = gsp->sr.meta.data != NULL; + struct nvkm_gsp_mem *meta; + GSP_FMC_BOOT_PARAMS *args; + int ret, time = 4000; + u32 mbox0; + + if (!resume) { + ret = nvkm_gsp_mem_ctor(gsp, sizeof(*args), &gsp->fmc.args); + if (ret) + return ret; + + meta = &gsp->wpr_meta; + } else { + gsp->rm->api->gsp->set_rmargs(gsp, true); + meta = &gsp->sr.meta; + } + + args = gsp->fmc.args.data; + + args->bootGspRmParams.gspRmDescOffset = meta->addr; + args->bootGspRmParams.gspRmDescSize = meta->size; + args->bootGspRmParams.target = GSP_DMA_TARGET_COHERENT_SYSTEM; + args->bootGspRmParams.bIsGspRmBoot = 1; + + args->gspRmParams.target = GSP_DMA_TARGET_NONCOHERENT_SYSTEM; + args->gspRmParams.bootArgsOffset = gsp->libos.addr; + + ret = nvkm_fsp_boot_gsp_fmc(device->fsp, gsp->fmc.args.addr, gsp->fb.heap.size, resume, + gsp->fmc.fw.addr, gsp->fmc.hash, gsp->fmc.pkey, gsp->fmc.sig); + if (ret) + return ret; + + do { + if (gh100_gsp_lockdown_released(gsp, &mbox0)) + break; + + usleep_range(1000, 2000); + } while(time--); + + if (time < 0) { + nvkm_error(subdev, "GSP-FMC boot timed out\n"); + return -ETIMEDOUT; + } + + if (mbox0) { + nvkm_error(subdev, "GSP-FMC boot failed (mbox: 0x%08x)\n", mbox0); + return -EIO; + } + + return r535_gsp_init(gsp); +} + +static int +gh100_gsp_wpr_meta_init(struct nvkm_gsp *gsp) +{ + GspFwWprMeta *meta; + int ret; + + ret = nvkm_gsp_mem_ctor(gsp, sizeof(*meta), &gsp->wpr_meta); + if (ret) + return ret; + + gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device); + gsp->fb.bios.vga_workspace.size = 128 * 1024; + gsp->fb.heap.size = gsp->rm->wpr->heap_size_non_wpr; + + meta = gsp->wpr_meta.data; + + meta->magic = GSP_FW_WPR_META_MAGIC; + meta->revision = GSP_FW_WPR_META_REVISION; + + meta->sizeOfRadix3Elf = gsp->fw.len; + meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr; + + meta->sizeOfBootloader = gsp->boot.fw.size; + meta->sysmemAddrOfBootloader = gsp->boot.fw.addr; + meta->bootloaderCodeOffset = gsp->boot.code_offset; + meta->bootloaderDataOffset = gsp->boot.data_offset; + meta->bootloaderManifestOffset = gsp->boot.manifest_offset; + + meta->sysmemAddrOfSignature = gsp->sig.addr; + meta->sizeOfSignature = gsp->sig.size; + + meta->nonWprHeapSize = gsp->fb.heap.size; + meta->gspFwHeapSize = tu102_gsp_wpr_heap_size(gsp); + meta->frtsSize = 0x100000; + meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size; + meta->pmuReservedSize = 0; + return 0; +} + +/* The sh_flags value for the binary blobs in the ELF image */ +#define FMC_SHF_FLAGS (SHF_MASKPROC | SHF_MASKOS | SHF_OS_NONCONFORMING | SHF_ALLOC) + +#define ELF_HDR_SIZE ((u8)sizeof(struct elf32_hdr)) +#define ELF_SHDR_SIZE ((u8)sizeof(struct elf32_shdr)) + +/* The FMC ELF header must be exactly this */ +static const u8 elf_header[] = { + 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + + 0, 0, 0, 0, 1, 0, 0, 0, /* e_type, e_machine, e_version */ + 0, 0, 0, 0, 0, 0, 0, 0, /* e_entry, e_phoff */ + + ELF_HDR_SIZE, 0, 0, 0, 0, 0, 0, 0, /* e_shoff, e_flags */ + ELF_HDR_SIZE, 0, 0, 0, /* e_ehsize, e_phentsize */ + 0, 0, ELF_SHDR_SIZE, 0, /* e_phnum, e_shentsize */ + + 6, 0, 1, 0, /* e_shnum, e_shstrndx */ +}; + +/** + * elf_validate_sections - validate each section in the FMC ELF image + * @elf: ELF image + * @length: size of the entire ELF image + */ +static bool +elf_validate_sections(const void *elf, size_t length) +{ + const struct elf32_hdr *ehdr = elf; + const struct elf32_shdr *shdr = elf + ehdr->e_shoff; + + /* The offset of the first section */ + Elf32_Off section_begin = ehdr->e_shoff + ehdr->e_shnum * ehdr->e_shentsize; + + if (section_begin > length) + return false; + + /* The first section header is the null section, so skip it */ + for (unsigned int i = 1; i < ehdr->e_shnum; i++) { + if (i == ehdr->e_shstrndx) { + if (shdr[i].sh_type != SHT_STRTAB) + return false; + if (shdr[i].sh_flags != SHF_STRINGS) + return false; + } else { + if (shdr[i].sh_type != SHT_PROGBITS) + return false; + if (shdr[i].sh_flags != FMC_SHF_FLAGS) + return false; + } + + /* Ensure that each section is inside the image */ + if (shdr[i].sh_offset < section_begin || + (u64)shdr[i].sh_offset + shdr[i].sh_size > length) + return false; + + /* Non-zero sh_info is a CRC */ + if (shdr[i].sh_info) { + /* The kernel's CRC32 needs a pre- and post-xor to match standard CRCs */ + u32 crc32 = crc32_le(~0, elf + shdr[i].sh_offset, shdr[i].sh_size) ^ ~0; + + if (shdr[i].sh_info != crc32) + return false; + } + } + + return true; +} + +/** + * elf_section - return a pointer to the data for a given section + * @elf: ELF image + * @name: section name to search for + * @len: pointer to returned length of found section + */ +static const void * +elf_section(const void *elf, const char *name, unsigned int *len) +{ + const struct elf32_hdr *ehdr = elf; + const struct elf32_shdr *shdr = elf + ehdr->e_shoff; + const char *names = elf + shdr[ehdr->e_shstrndx].sh_offset; + + for (unsigned int i = 1; i < ehdr->e_shnum; i++) { + if (!strcmp(&names[shdr[i].sh_name], name)) { + *len = shdr[i].sh_size; + return elf + shdr[i].sh_offset; + } + } + + return NULL; +} + +static int +gh100_gsp_oneinit(struct nvkm_gsp *gsp) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_fsp *fsp = device->fsp; + const void *fw = gsp->fws.fmc->data; + const void *hash, *sig, *pkey, *img; + unsigned int img_len = 0, hash_len = 0, pkey_len = 0, sig_len = 0; + int ret; + + if (gsp->fws.fmc->size < ELF_HDR_SIZE || + memcmp(fw, elf_header, sizeof(elf_header)) || + !elf_validate_sections(fw, gsp->fws.fmc->size)) { + nvkm_error(subdev, "fmc firmware image is invalid\n"); + return -ENODATA; + } + + hash = elf_section(fw, "hash", &hash_len); + sig = elf_section(fw, "signature", &sig_len); + pkey = elf_section(fw, "publickey", &pkey_len); + img = elf_section(fw, "image", &img_len); + + if (!hash || !sig || !pkey || !img) { + nvkm_error(subdev, "fmc firmware image is invalid\n"); + return -ENODATA; + } + + if (!nvkm_fsp_verify_gsp_fmc(fsp, hash_len, pkey_len, sig_len)) + return -EINVAL; + + /* Load GSP-FMC FW into memory. */ + ret = nvkm_gsp_mem_ctor(gsp, img_len, &gsp->fmc.fw); + if (ret) + return ret; + + memcpy(gsp->fmc.fw.data, img, img_len); + + gsp->fmc.hash = kmemdup(hash, hash_len, GFP_KERNEL); + gsp->fmc.pkey = kmemdup(pkey, pkey_len, GFP_KERNEL); + gsp->fmc.sig = kmemdup(sig, sig_len, GFP_KERNEL); + if (!gsp->fmc.hash || !gsp->fmc.pkey || !gsp->fmc.sig) + return -ENOMEM; + + ret = r535_gsp_oneinit(gsp); + if (ret) + return ret; + + return gh100_gsp_wpr_meta_init(gsp); +} + +static const struct nvkm_gsp_func +gh100_gsp = { + .flcn = &ga102_gsp_flcn, + + .sig_section = ".fwsignature_gh100", + + .dtor = r535_gsp_dtor, + .oneinit = gh100_gsp_oneinit, + .init = gh100_gsp_init, + .fini = gh100_gsp_fini, + + .rm.gpu = &gh100_gpu, +}; + +static int +gh100_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) +{ + int ret; + + ret = tu102_gsp_load_rm(gsp, fwif); + if (ret) + goto done; + + ret = nvkm_gsp_load_fw(gsp, "fmc", fwif->ver, &gsp->fws.fmc); + +done: + if (ret) + nvkm_gsp_dtor_fws(gsp); + + return ret; +} + +static struct nvkm_gsp_fwif +gh100_gsps[] = { + { 0, gh100_gsp_load, &gh100_gsp, &r570_rm_gh100, "570.144", true }, + {} +}; + +int +gh100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gsp **pgsp) +{ + return nvkm_gsp_new_(gh100_gsps, device, type, inst, pgsp); +} + +NVKM_GSP_FIRMWARE_FMC(gh100, 570.144); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h index c8429863b642..86ec580ba936 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -22,7 +22,9 @@ int nvkm_gsp_load_fw(struct nvkm_gsp *, const char *name, const char *ver, void nvkm_gsp_dtor_fws(struct nvkm_gsp *); int gv100_gsp_nofw(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *); + int tu102_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *); +int tu102_gsp_load_rm(struct nvkm_gsp *, const struct nvkm_gsp_fwif *); #define NVKM_GSP_FIRMWARE_BOOTER(chip,vers) \ MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-"#vers".bin"); \ @@ -30,6 +32,11 @@ MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-"#vers".bin"); \ MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-"#vers".bin"); \ MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-"#vers".bin") +#define NVKM_GSP_FIRMWARE_FMC(chip,vers) \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/fmc-"#vers".bin"); \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-"#vers".bin"); \ +MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-"#vers".bin") + struct nvkm_gsp_func { const struct nvkm_falcon_func *flcn; const struct nvkm_falcon_fw_func *fwsec; @@ -60,6 +67,7 @@ int tu102_gsp_oneinit(struct nvkm_gsp *); int tu102_gsp_init(struct nvkm_gsp *); int tu102_gsp_fini(struct nvkm_gsp *, bool suspend); int tu102_gsp_reset(struct nvkm_gsp *); +u64 tu102_gsp_wpr_heap_size(struct nvkm_gsp *); extern const struct nvkm_falcon_func ga102_gsp_flcn; extern const struct nvkm_falcon_fw_func ga102_gsp_fwsec; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild index d3f4e60bb131..2a71868d6710 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild @@ -11,6 +11,7 @@ nvkm-y += nvkm/subdev/gsp/rm/tu1xx.o nvkm-y += nvkm/subdev/gsp/rm/ga100.o nvkm-y += nvkm/subdev/gsp/rm/ga1xx.o nvkm-y += nvkm/subdev/gsp/rm/ad10x.o +nvkm-y += nvkm/subdev/gsp/rm/gh100.o include $(src)/nvkm/subdev/gsp/rm/r535/Kbuild include $(src)/nvkm/subdev/gsp/rm/r570/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c new file mode 100644 index 000000000000..088250559e12 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "gpu.h" + +#include + +const struct nvkm_rm_gpu +gh100_gpu = { + .usermode.class = HOPPER_USERMODE_A, + + .fifo.chan = { + .class = HOPPER_CHANNEL_GPFIFO_A, + }, + + .ce.class = HOPPER_DMA_COPY_A, + .gr.class = { + .i2m = KEPLER_INLINE_TO_MEMORY_B, + .twod = FERMI_TWOD_A, + .threed = HOPPER_A, + .compute = HOPPER_COMPUTE_A, + }, + .nvdec.class = NVB8B0_VIDEO_DECODER, + .nvjpg.class = NVB8D1_VIDEO_NVJPG, + .ofa.class = NVB8FA_VIDEO_OFA, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h index a256be42ab6e..443753f3369a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h @@ -62,4 +62,5 @@ extern const struct nvkm_rm_gpu tu1xx_gpu; extern const struct nvkm_rm_gpu ga100_gpu; extern const struct nvkm_rm_gpu ga1xx_gpu; extern const struct nvkm_rm_gpu ad10x_gpu; +extern const struct nvkm_rm_gpu gh100_gpu; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index fe00425c5479..baf42339f93e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -2073,6 +2073,12 @@ r535_gsp_dtor(struct nvkm_gsp *gsp) nvkm_falcon_fw_dtor(&gsp->booter.unload); nvkm_falcon_fw_dtor(&gsp->booter.load); + nvkm_gsp_mem_dtor(&gsp->fmc.args); + kfree(gsp->fmc.sig); + kfree(gsp->fmc.pkey); + kfree(gsp->fmc.hash); + nvkm_gsp_mem_dtor(&gsp->fmc.fw); + mutex_destroy(&gsp->msgq.mutex); mutex_destroy(&gsp->cmdq.mutex); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c index 730dcb645cca..9d2fa4e66d59 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c @@ -26,8 +26,10 @@ r570_gsp_sr_data_size(struct nvkm_gsp *gsp) static void r570_gsp_drop_post_nocat_record(struct nvkm_gsp *gsp) { - if (gsp->subdev.debug < NV_DBG_DEBUG) + if (gsp->subdev.debug < NV_DBG_DEBUG) { r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD, NULL, NULL); + r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE, NULL, NULL); + } } static bool @@ -102,6 +104,13 @@ r570_gsp_get_static_info(struct nvkm_gsp *gsp) r535_gsp_get_static_info_fb(gsp, &rpc->fbRegionInfoParams); + if (gsp->rm->wpr->offset_set_by_acr) { + GspFwWprMeta *meta = gsp->wpr_meta.data; + + meta->nonWprHeapOffset = rpc->fwWprLayoutOffset.nonWprHeapOffset; + meta->frtsOffset = rpc->fwWprLayoutOffset.frtsOffset; + } + nvkm_gsp_rpc_done(gsp, rpc); ret = r570_gr_gpc_mask(gsp, &gpc_mask); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h index 4685a898fac6..b6075021e74f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h @@ -573,4 +573,62 @@ typedef struct (88u + (BULLSEYE_ROOT_HEAP_ALLOC_RM_DATA_SECTION_SIZE_DELTA) + \ (BULLSEYE_ROOT_HEAP_ALLOC_BAREMETAL_LIBOS_HEAP_SIZE_DELTA)) +typedef struct GSP_FMC_INIT_PARAMS +{ + // CC initialization "registry keys" + NvU32 regkeys; +} GSP_FMC_INIT_PARAMS; + +typedef enum { + GSP_DMA_TARGET_LOCAL_FB, + GSP_DMA_TARGET_COHERENT_SYSTEM, + GSP_DMA_TARGET_NONCOHERENT_SYSTEM, + GSP_DMA_TARGET_COUNT +} GSP_DMA_TARGET; + +typedef struct GSP_ACR_BOOT_GSP_RM_PARAMS +{ + // Physical memory aperture through which gspRmDescPa is accessed + GSP_DMA_TARGET target; + // Size in bytes of the GSP-RM descriptor structure + NvU32 gspRmDescSize; + // Physical offset in the target aperture of the GSP-RM descriptor structure + NvU64 gspRmDescOffset; + // Physical offset in FB to set the start of the WPR containing GSP-RM + NvU64 wprCarveoutOffset; + // Size in bytes of the WPR containing GSP-RM + NvU32 wprCarveoutSize; + // Whether to boot GSP-RM or GSP-Proxy through ACR + NvBool bIsGspRmBoot; +} GSP_ACR_BOOT_GSP_RM_PARAMS; + +typedef struct GSP_RM_PARAMS +{ + // Physical memory aperture through which bootArgsOffset is accessed + GSP_DMA_TARGET target; + // Physical offset in the memory aperture that will be passed to GSP-RM + NvU64 bootArgsOffset; +} GSP_RM_PARAMS; + +typedef struct GSP_SPDM_PARAMS +{ + // Physical Memory Aperture through which all addresses are accessed + GSP_DMA_TARGET target; + + // Physical offset in the memory aperture where SPDM payload is stored + NvU64 payloadBufferOffset; + + // Size of the above payload buffer + NvU32 payloadBufferSize; +} GSP_SPDM_PARAMS; + +typedef struct GSP_FMC_BOOT_PARAMS +{ + GSP_FMC_INIT_PARAMS initParams; + GSP_ACR_BOOT_GSP_RM_PARAMS bootGspRmParams; + GSP_RM_PARAMS gspRmParams; + GSP_SPDM_PARAMS gspSpdmParams; +} GSP_FMC_BOOT_PARAMS; + +#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100 (14 << 20) // Hopper+ #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c index ad80d8a3d6d3..8a641e5a5b92 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c @@ -20,6 +20,15 @@ r570_wpr_libos3 = { .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, }; +static const struct nvkm_rm_wpr +r570_wpr_libos3_gh100 = { + .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL, + .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100, + .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, + .heap_size_non_wpr = 0x200000, + .offset_set_by_acr = true, +}; + static const struct nvkm_rm_api r570_api = { .gsp = &r570_gsp, @@ -50,3 +59,9 @@ r570_rm_ga102 = { .wpr = &r570_wpr_libos3, .api = &r570_api, }; + +const struct nvkm_rm_impl +r570_rm_gh100 = { + .wpr = &r570_wpr_libos3_gh100, + .api = &r570_api, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index ead48c106bb6..626ebce39be5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -25,6 +25,8 @@ struct nvkm_rm_wpr { u32 os_carveout_size; u32 base_size; u64 heap_size_min; + u32 heap_size_non_wpr; + bool offset_set_by_acr; }; struct nvkm_rm_api { @@ -173,6 +175,7 @@ extern const struct nvkm_rm_api_engine r535_ofa; extern const struct nvkm_rm_impl r570_rm_tu102; extern const struct nvkm_rm_impl r570_rm_ga102; +extern const struct nvkm_rm_impl r570_rm_gh100; extern const struct nvkm_rm_api_gsp r570_gsp; extern const struct nvkm_rm_api_client r570_client; extern const struct nvkm_rm_api_fbsr r570_fbsr; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c index 97c02aa93d55..58e233bc53b1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c @@ -246,7 +246,7 @@ tu102_gsp_wpr_meta_init(struct nvkm_gsp *gsp) return 0; } -static u64 +u64 tu102_gsp_wpr_heap_size(struct nvkm_gsp *gsp) { u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30); @@ -379,7 +379,7 @@ tu102_gsp = { .rm.gpu = &tu1xx_gpu, }; -static int +int tu102_gsp_load_rm(struct nvkm_gsp *gsp, const struct nvkm_gsp_fwif *fwif) { struct nvkm_subdev *subdev = &gsp->subdev; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild index 06cbe19ce376..fa7a2862dd1f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild @@ -4,3 +4,4 @@ nvkm-y += nvkm/subdev/instmem/nv04.o nvkm-y += nvkm/subdev/instmem/nv40.o nvkm-y += nvkm/subdev/instmem/nv50.o nvkm-y += nvkm/subdev/instmem/gk20a.o +nvkm-y += nvkm/subdev/instmem/gh100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c new file mode 100644 index 000000000000..8d8dd5f8a6c7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include + +static void +gh100_instmem_set_bar0_window_addr(struct nvkm_device *device, u64 addr) +{ + nvkm_wr32(device, NV_XAL_EP_BAR0_WINDOW, addr >> NV_XAL_EP_BAR0_WINDOW_BASE_SHIFT); +} + +static const struct nvkm_instmem_func +gh100_instmem = { + .fini = nv50_instmem_fini, + .memory_new = nv50_instobj_new, + .memory_wrap = nv50_instobj_wrap, + .set_bar0_window_addr = gh100_instmem_set_bar0_window_addr, +}; + +int +gh100_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_instmem **pimem) +{ + return r535_instmem_new(&gh100_instmem, device, type, inst, pimem); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c index 9d29e5234734..4ca6fb30743d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c @@ -353,7 +353,7 @@ nv50_instobj_func = { .map = nv50_instobj_map, }; -static int +int nv50_instobj_wrap(struct nvkm_instmem *base, struct nvkm_memory *memory, struct nvkm_memory **pmemory) { @@ -373,7 +373,7 @@ nv50_instobj_wrap(struct nvkm_instmem *base, return 0; } -static int +int nv50_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, struct nvkm_memory **pmemory) { @@ -400,7 +400,7 @@ nv50_instmem_set_bar0_window_addr(struct nvkm_device *device, u64 addr) nvkm_wr32(device, 0x001700, addr >> 16); } -static void +void nv50_instmem_fini(struct nvkm_instmem *base) { nv50_instmem(base)->addr = ~0ULL; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h index d5b5fcd9262b..87bbdd786eaa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h @@ -21,6 +21,11 @@ struct nvkm_instmem_func { int nv50_instmem_new_(const struct nvkm_instmem_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **); +void nv50_instmem_fini(struct nvkm_instmem *); +int nv50_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero, + struct nvkm_memory **); +int nv50_instobj_wrap(struct nvkm_instmem *, struct nvkm_memory *vram, + struct nvkm_memory **bar2); void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild index a602b0cb5b31..ea4848931540 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild @@ -15,6 +15,7 @@ nvkm-y += nvkm/subdev/mmu/gp100.o nvkm-y += nvkm/subdev/mmu/gp10b.o nvkm-y += nvkm/subdev/mmu/gv100.o nvkm-y += nvkm/subdev/mmu/tu102.o +nvkm-y += nvkm/subdev/mmu/gh100.o nvkm-y += nvkm/subdev/mmu/mem.o nvkm-y += nvkm/subdev/mmu/memnv04.o @@ -36,6 +37,7 @@ nvkm-y += nvkm/subdev/mmu/vmmgp100.o nvkm-y += nvkm/subdev/mmu/vmmgp10b.o nvkm-y += nvkm/subdev/mmu/vmmgv100.o nvkm-y += nvkm/subdev/mmu/vmmtu102.o +nvkm-y += nvkm/subdev/mmu/vmmgh100.o nvkm-y += nvkm/subdev/mmu/umem.o nvkm-y += nvkm/subdev/mmu/ummu.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c new file mode 100644 index 000000000000..2918fb32cc91 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "mem.h" +#include "vmm.h" + +#include + +static const struct nvkm_mmu_func +gh100_mmu = { + .dma_bits = 52, + .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}}, + .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map }, + .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gh100_vmm_new }, + .kind = tu102_mmu_kind, + .kind_sys = true, +}; + +int +gh100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) +{ + return r535_mmu_new(&gh100_mmu, device, type, inst, pmmu); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h index e9ca6537778c..90efef8f0b54 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h @@ -53,6 +53,8 @@ const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid); const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *); +const u8 *tu102_mmu_kind(struct nvkm_mmu *, int *, u8 *); + struct nvkm_mmu_pt { union { struct nvkm_mmu_ptc *ptc; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c index df662ce4a4b0..7acff3642e20 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c @@ -28,7 +28,7 @@ #include -static const u8 * +const u8 * tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid) { static const u8 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h index f9bc30cdb2b3..4586a425dbe4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h @@ -143,6 +143,8 @@ struct nvkm_vmm_func { int (*aper)(enum nvkm_memory_target); int (*valid)(struct nvkm_vmm *, void *argv, u32 argc, struct nvkm_vmm_map *); + int (*valid2)(struct nvkm_vmm *, bool ro, bool priv, u8 kind, u8 comp, + struct nvkm_vmm_map *); void (*flush)(struct nvkm_vmm *, int depth); int (*mthd)(struct nvkm_vmm *, struct nvkm_client *, @@ -254,6 +256,8 @@ void gp100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr); int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *); +void tu102_vmm_flush(struct nvkm_vmm *, int depth); + int nv04_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, struct lock_class_key *, const char *, struct nvkm_vmm **); int nv41_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, @@ -296,6 +300,9 @@ int gv100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, struct lock_class_key *, const char *, struct nvkm_vmm **); +int gh100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, + struct lock_class_key *, const char *, + struct nvkm_vmm **); #define VMM_PRINT(l,v,p,f,a...) do { \ struct nvkm_vmm *_vmm = (v); \ diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c new file mode 100644 index 000000000000..5614df3432da --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "vmm.h" + +#include + +#include +#include + +static inline void +gh100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes, + struct nvkm_vmm_map *map, u64 addr) +{ + u64 data = addr | map->type; + + while (ptes--) { + VMM_WO064(pt, vmm, ptei++ * NV_MMU_VER3_PTE__SIZE, data); + data += map->next; + } +} + +static void +gh100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes, + struct nvkm_vmm_map *map) +{ + VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte); +} + +static void +gh100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes, + struct nvkm_vmm_map *map) +{ + if (map->page->shift == PAGE_SHIFT) { + VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); + + nvkm_kmap(pt->memory); + while (ptes--) { + const u64 data = *map->dma++ | map->type; + + VMM_WO064(pt, vmm, ptei++ * NV_MMU_VER3_PTE__SIZE, data); + } + nvkm_done(pt->memory); + return; + } + + VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte); +} + +static void +gh100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes, + struct nvkm_vmm_map *map) +{ + VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte); +} + +static void +gh100_vmm_pgt_sparse(struct nvkm_vmm *vmm, + struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) +{ + const u64 data = NVDEF(NV_MMU, VER3_PTE, PCF, SPARSE); + + VMM_FO064(pt, vmm, ptei * NV_MMU_VER3_PTE__SIZE, data, ptes); +} + +static const struct nvkm_vmm_desc_func +gh100_vmm_desc_spt = { + .unmap = gf100_vmm_pgt_unmap, + .sparse = gh100_vmm_pgt_sparse, + .mem = gh100_vmm_pgt_mem, + .dma = gh100_vmm_pgt_dma, + .sgl = gh100_vmm_pgt_sgl, +}; + +static void +gh100_vmm_lpt_invalid(struct nvkm_vmm *vmm, + struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) +{ + const u64 data = NVDEF(NV_MMU, VER3_PTE, PCF, NO_VALID_4KB_PAGE); + + VMM_FO064(pt, vmm, ptei * NV_MMU_VER3_PTE__SIZE, data, ptes); +} + +static const struct nvkm_vmm_desc_func +gh100_vmm_desc_lpt = { + .invalid = gh100_vmm_lpt_invalid, + .unmap = gf100_vmm_pgt_unmap, + .sparse = gh100_vmm_pgt_sparse, + .mem = gh100_vmm_pgt_mem, +}; + +static inline void +gh100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, + u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) +{ + u64 data = addr | map->type; + + while (ptes--) { + VMM_WO128(pt, vmm, ptei++ * NV_MMU_VER3_DUAL_PDE__SIZE, data, 0ULL); + data += map->next; + } +} + +static void +gh100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, + u32 ptei, u32 ptes, struct nvkm_vmm_map *map) +{ + VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gh100_vmm_pd0_pte); +} + +static inline bool +gh100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data) +{ + switch (nvkm_memory_target(pt->memory)) { + case NVKM_MEM_TARGET_VRAM: + *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, VIDEO_MEMORY); + *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_CACHED_ATS_NOT_ALLOWED); + break; + case NVKM_MEM_TARGET_HOST: + *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, SYSTEM_COHERENT_MEMORY); + *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_UNCACHED_ATS_ALLOWED); + break; + case NVKM_MEM_TARGET_NCOH: + *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, SYSTEM_NON_COHERENT_MEMORY); + *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_CACHED_ATS_ALLOWED); + break; + default: + WARN_ON(1); + return false; + } + + *data |= pt->addr; + return true; +} + +static void +gh100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei) +{ + struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; + struct nvkm_mmu_pt *pd = pgd->pt[0]; + u64 data[2] = {}; + + if (pgt->pt[0] && !gh100_vmm_pde(pgt->pt[0], &data[0])) + return; + if (pgt->pt[1] && !gh100_vmm_pde(pgt->pt[1], &data[1])) + return; + + nvkm_kmap(pd->memory); + VMM_WO128(pd, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, data[0], data[1]); + nvkm_done(pd->memory); +} + +static void +gh100_vmm_pd0_sparse(struct nvkm_vmm *vmm, + struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes) +{ + const u64 data = NVDEF(NV_MMU, VER3_DUAL_PDE, PCF_BIG, SPARSE_ATS_ALLOWED); + + VMM_FO128(pt, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, data, 0ULL, pdes); +} + +static void +gh100_vmm_pd0_unmap(struct nvkm_vmm *vmm, + struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes) +{ + VMM_FO128(pt, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, 0ULL, 0ULL, pdes); +} + +static const struct nvkm_vmm_desc_func +gh100_vmm_desc_pd0 = { + .unmap = gh100_vmm_pd0_unmap, + .sparse = gh100_vmm_pd0_sparse, + .pde = gh100_vmm_pd0_pde, + .mem = gh100_vmm_pd0_mem, +}; + +static void +gh100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei) +{ + struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; + struct nvkm_mmu_pt *pd = pgd->pt[0]; + u64 data = 0; + + if (!gh100_vmm_pde(pgt->pt[0], &data)) + return; + + nvkm_kmap(pd->memory); + VMM_WO064(pd, vmm, pdei * NV_MMU_VER3_PDE__SIZE, data); + nvkm_done(pd->memory); +} + +static const struct nvkm_vmm_desc_func +gh100_vmm_desc_pd1 = { + .unmap = gf100_vmm_pgt_unmap, + .sparse = gh100_vmm_pgt_sparse, + .pde = gh100_vmm_pd1_pde, +}; + +static const struct nvkm_vmm_desc +gh100_vmm_desc_16[] = { + { LPT, 5, 8, 0x0100, &gh100_vmm_desc_lpt }, + { PGD, 8, 16, 0x1000, &gh100_vmm_desc_pd0 }, + { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 }, + { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 }, + { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 }, + { PGD, 1, 8, 0x1000, &gh100_vmm_desc_pd1 }, + {} +}; + +static const struct nvkm_vmm_desc +gh100_vmm_desc_12[] = { + { SPT, 9, 8, 0x1000, &gh100_vmm_desc_spt }, + { PGD, 8, 16, 0x1000, &gh100_vmm_desc_pd0 }, + { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 }, + { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 }, + { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 }, + { PGD, 1, 8, 0x1000, &gh100_vmm_desc_pd1 }, + {} +}; + +static int +gh100_vmm_valid(struct nvkm_vmm *vmm, bool ro, bool priv, u8 kind, u8 comp, + struct nvkm_vmm_map *map) +{ + const enum nvkm_memory_target target = nvkm_memory_target(map->memory); + const bool vol = target == NVKM_MEM_TARGET_HOST; + const struct nvkm_vmm_page *page = map->page; + u8 kind_inv, pcf; + int kindn, aper; + const u8 *kindm; + + map->next = 1ULL << page->shift; + map->type = 0; + + aper = vmm->func->aper(target); + if (WARN_ON(aper < 0)) + return aper; + + kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); + if (kind >= kindn || kindm[kind] == kind_inv) { + VMM_DEBUG(vmm, "kind %02x", kind); + return -EINVAL; + } + + if (priv) { + if (ro) { + if (vol) + pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACD; + else + pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACD; + } else { + if (vol) + pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACD; + else + pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACD; + } + } else { + if (ro) { + if (vol) + pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACD; + else + pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACD; + } else { + if (vol) + pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACD; + else + pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACD; + } + } + + map->type |= NVDEF(NV_MMU, VER3_PTE, VALID, TRUE); + map->type |= NVVAL(NV_MMU, VER3_PTE, APERTURE, aper); + map->type |= NVVAL(NV_MMU, VER3_PTE, PCF, pcf); + map->type |= NVVAL(NV_MMU, VER3_PTE, KIND, kind); + return 0; +} + +static const struct nvkm_vmm_func +gh100_vmm = { + .join = gv100_vmm_join, + .part = gf100_vmm_part, + .aper = gf100_vmm_aper, + .valid = gp100_vmm_valid, + .valid2 = gh100_vmm_valid, + .flush = tu102_vmm_flush, + .page = { + { 56, &gh100_vmm_desc_16[5], NVKM_VMM_PAGE_Sxxx }, + { 47, &gh100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx }, + { 38, &gh100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx }, + { 29, &gh100_vmm_desc_16[2], NVKM_VMM_PAGE_SVxC }, + { 21, &gh100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC }, + { 16, &gh100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC }, + { 12, &gh100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx }, + {} + } +}; + +int +gh100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, + void *argv, u32 argc, struct lock_class_key *key, + const char *name, struct nvkm_vmm **pvmm) +{ + return gp100_vmm_new_(&gh100_vmm, mmu, managed, addr, size, + argv, argc, key, name, pvmm); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c index bddac77f48f0..851fd847a2a9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c @@ -436,6 +436,9 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, return ret; } + if (vmm->func->valid2) + return vmm->func->valid2(vmm, ro, priv, kind, 0, map); + aper = vmm->func->aper(target); if (WARN_ON(aper < 0)) return aper; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c index 8379e72d77ab..4b30eab40bba 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c @@ -23,7 +23,7 @@ #include -static void +void tu102_vmm_flush(struct nvkm_vmm *vmm, int depth) { struct nvkm_device *device = vmm->mmu->subdev.device; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild index 174bdf995271..a14ea0f7b1c8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild @@ -13,3 +13,4 @@ nvkm-y += nvkm/subdev/pci/gf100.o nvkm-y += nvkm/subdev/pci/gf106.o nvkm-y += nvkm/subdev/pci/gk104.o nvkm-y += nvkm/subdev/pci/gp100.o +nvkm-y += nvkm/subdev/pci/gh100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c new file mode 100644 index 000000000000..42da92d7a5fe --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include +#include + +static void +gh100_pci_msi_rearm(struct nvkm_pci *pci) +{ + /* Handled by top-level intr ACK. */ +} + +static const struct nvkm_pci_func +gh100_pci = { + .cfg = { + .addr = DRF_LO(NV_EP_PCFGM), + .size = DRF_HI(NV_EP_PCFGM) - DRF_LO(NV_EP_PCFGM) + 1, + }, + .msi_rearm = gh100_pci_msi_rearm, +}; + +int +gh100_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pci **ppci) +{ + return nvkm_pci_new_(&gh100_pci, device, type, inst, ppci); +} -- cgit v1.2.3 From 627664de4b8e908b0e9073031dd5e0b6d565e759 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 11 Mar 2025 18:21:19 +1000 Subject: drm/nouveau: add helper functions for allocating pinned/cpu-mapped bos Replace some awkward sequences that are repeated in a number of places with helper functions. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/dispnv04/crtc.c | 22 +++---------- drivers/gpu/drm/nouveau/dispnv50/disp.c | 20 ++---------- drivers/gpu/drm/nouveau/nouveau_bo.c | 55 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_bo.h | 5 +++ drivers/gpu/drm/nouveau/nouveau_chan.c | 14 ++------- drivers/gpu/drm/nouveau/nouveau_dmem.c | 18 +++-------- drivers/gpu/drm/nouveau/nv10_fence.c | 6 ++-- drivers/gpu/drm/nouveau/nv17_fence.c | 15 +-------- drivers/gpu/drm/nouveau/nv50_fence.c | 15 +-------- drivers/gpu/drm/nouveau/nv84_fence.c | 19 ++---------- 10 files changed, 81 insertions(+), 108 deletions(-) diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 67146f1e8482..c063756eaea3 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -768,9 +768,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc) disp->image[nv_crtc->index] = NULL; } - nouveau_bo_unmap(nv_crtc->cursor.nvbo); - nouveau_bo_unpin(nv_crtc->cursor.nvbo); - nouveau_bo_fini(nv_crtc->cursor.nvbo); + nouveau_bo_unpin_del(&nv_crtc->cursor.nvbo); nvif_event_dtor(&nv_crtc->vblank); nvif_head_dtor(&nv_crtc->head); kfree(nv_crtc); @@ -1303,6 +1301,7 @@ nv04_crtc_vblank_handler(struct nvif_event *event, void *repv, u32 repc) int nv04_crtc_create(struct drm_device *dev, int crtc_num) { + struct nouveau_cli *cli = &nouveau_drm(dev)->client; struct nouveau_display *disp = nouveau_display(dev); struct nouveau_crtc *nv_crtc; struct drm_plane *primary; @@ -1336,20 +1335,9 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs); drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); - ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100, - NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL, - &nv_crtc->cursor.nvbo); - if (!ret) { - ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, - NOUVEAU_GEM_DOMAIN_VRAM, false); - if (!ret) { - ret = nouveau_bo_map(nv_crtc->cursor.nvbo); - if (ret) - nouveau_bo_unpin(nv_crtc->cursor.nvbo); - } - if (ret) - nouveau_bo_fini(nv_crtc->cursor.nvbo); - } + ret = nouveau_bo_new_map(cli, NOUVEAU_GEM_DOMAIN_VRAM, 64 * 64 * 4, &nv_crtc->cursor.nvbo); + if (ret) + return ret; nv04_cursor_init(nv_crtc); diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 9bed728cb00e..10485510b539 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2808,10 +2808,7 @@ nv50_display_destroy(struct drm_device *dev) nvif_object_dtor(&disp->caps); nv50_core_del(&disp->core); - nouveau_bo_unmap(disp->sync); - if (disp->sync) - nouveau_bo_unpin(disp->sync); - nouveau_bo_fini(disp->sync); + nouveau_bo_unpin_del(&disp->sync); nouveau_display(dev)->priv = NULL; kfree(disp); @@ -2843,20 +2840,7 @@ nv50_display_create(struct drm_device *dev) dev->mode_config.normalize_zpos = true; /* small shared memory area we use for notifiers and semaphores */ - ret = nouveau_bo_new(&drm->client, 4096, 0x1000, - NOUVEAU_GEM_DOMAIN_VRAM, - 0, 0x0000, NULL, NULL, &disp->sync); - if (!ret) { - ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true); - if (!ret) { - ret = nouveau_bo_map(disp->sync); - if (ret) - nouveau_bo_unpin(disp->sync); - } - if (ret) - nouveau_bo_fini(disp->sync); - } - + ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &disp->sync); if (ret) goto out; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index fbe0144927e8..3a5ddf60380e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -401,6 +401,61 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, return 0; } +void +nouveau_bo_unpin_del(struct nouveau_bo **pnvbo) +{ + struct nouveau_bo *nvbo = *pnvbo; + + if (!nvbo) + return; + + nouveau_bo_unmap(nvbo); + nouveau_bo_unpin(nvbo); + nouveau_bo_fini(nvbo); + + *pnvbo = NULL; +} + +int +nouveau_bo_new_pin(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau_bo **pnvbo) +{ + struct nouveau_bo *nvbo; + int ret; + + ret = nouveau_bo_new(cli, size, 0, domain, 0, 0, NULL, NULL, &nvbo); + if (ret) + return ret; + + ret = nouveau_bo_pin(nvbo, domain, false); + if (ret) { + nouveau_bo_fini(nvbo); + return ret; + } + + *pnvbo = nvbo; + return 0; +} + +int +nouveau_bo_new_map(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau_bo **pnvbo) +{ + struct nouveau_bo *nvbo; + int ret; + + ret = nouveau_bo_new_pin(cli, domain, size, &nvbo); + if (ret) + return ret; + + ret = nouveau_bo_map(nvbo); + if (ret) { + nouveau_bo_unpin_del(&nvbo); + return ret; + } + + *pnvbo = nvbo; + return 0; +} + static void set_placement_range(struct nouveau_bo *nvbo, uint32_t domain) { diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index 596a63a50a20..f402f14bebb0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -9,6 +9,7 @@ struct nouveau_channel; struct nouveau_cli; struct nouveau_drm; struct nouveau_fence; +struct nouveau_vma; struct nouveau_bo { struct ttm_buffer_object bo; @@ -89,6 +90,10 @@ void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo); void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo); void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo); +int nouveau_bo_new_pin(struct nouveau_cli *, u32 domain, u32 size, struct nouveau_bo **); +int nouveau_bo_new_map(struct nouveau_cli *, u32 domain, u32 size, struct nouveau_bo **); +void nouveau_bo_unpin_del(struct nouveau_bo **); + /* TODO: submit equivalent to TTM generic API upstream? */ static inline void __iomem * nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo) diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index a14aa6715bb9..4b4bbbd8d7b7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -105,10 +105,7 @@ nouveau_channel_del(struct nouveau_channel **pchan) nvif_mem_dtor(&chan->mem_userd); nvif_object_dtor(&chan->push.ctxdma); nouveau_vma_del(&chan->push.vma); - nouveau_bo_unmap(chan->push.buffer); - if (chan->push.buffer && chan->push.buffer->bo.pin_count) - nouveau_bo_unpin(chan->push.buffer); - nouveau_bo_fini(chan->push.buffer); + nouveau_bo_unpin_del(&chan->push.buffer); kfree(chan); } *pchan = NULL; @@ -163,14 +160,7 @@ nouveau_channel_prep(struct nouveau_cli *cli, if (nouveau_vram_pushbuf) target = NOUVEAU_GEM_DOMAIN_VRAM; - ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL, - &chan->push.buffer); - if (ret == 0) { - ret = nouveau_bo_pin(chan->push.buffer, target, false); - if (ret == 0) - ret = nouveau_bo_map(chan->push.buffer); - } - + ret = nouveau_bo_new_map(cli, target, size, &chan->push.buffer); if (ret) { nouveau_channel_del(pchan); return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 61d0f411ef84..ca4932a150e3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -256,20 +256,15 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage) chunk->pagemap.ops = &nouveau_dmem_pagemap_ops; chunk->pagemap.owner = drm->dev; - ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0, - NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL, - &chunk->bo); + ret = nouveau_bo_new_pin(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, DMEM_CHUNK_SIZE, + &chunk->bo); if (ret) goto out_release; - ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); - if (ret) - goto out_bo_free; - ptr = memremap_pages(&chunk->pagemap, numa_node_id()); if (IS_ERR(ptr)) { ret = PTR_ERR(ptr); - goto out_bo_unpin; + goto out_bo_free; } mutex_lock(&drm->dmem->mutex); @@ -292,10 +287,8 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage) return 0; -out_bo_unpin: - nouveau_bo_unpin(chunk->bo); out_bo_free: - nouveau_bo_fini(chunk->bo); + nouveau_bo_unpin_del(&chunk->bo); out_release: release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range)); out_free: @@ -426,8 +419,7 @@ nouveau_dmem_fini(struct nouveau_drm *drm) list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) { nouveau_dmem_evict_chunk(chunk); - nouveau_bo_unpin(chunk->bo); - nouveau_bo_fini(chunk->bo); + nouveau_bo_unpin_del(&chunk->bo); WARN_ON(chunk->callocated); list_del(&chunk->list); memunmap_pages(&chunk->pagemap); diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c index 8c73f40e3bda..40ee95340814 100644 --- a/drivers/gpu/drm/nouveau/nv10_fence.c +++ b/drivers/gpu/drm/nouveau/nv10_fence.c @@ -85,10 +85,8 @@ void nv10_fence_destroy(struct nouveau_drm *drm) { struct nv10_fence_priv *priv = drm->fence; - nouveau_bo_unmap(priv->bo); - if (priv->bo) - nouveau_bo_unpin(priv->bo); - nouveau_bo_fini(priv->bo); + + nouveau_bo_unpin_del(&priv->bo); drm->fence = NULL; kfree(priv); } diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index d09bfd11369f..1b0c0aa3c305 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c @@ -130,20 +130,7 @@ nv17_fence_create(struct nouveau_drm *drm) priv->base.context_del = nv10_fence_context_del; spin_lock_init(&priv->lock); - ret = nouveau_bo_new(&drm->client, 4096, 0x1000, - NOUVEAU_GEM_DOMAIN_VRAM, - 0, 0x0000, NULL, NULL, &priv->bo); - if (!ret) { - ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); - if (!ret) { - ret = nouveau_bo_map(priv->bo); - if (ret) - nouveau_bo_unpin(priv->bo); - } - if (ret) - nouveau_bo_fini(priv->bo); - } - + ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &priv->bo); if (ret) { nv10_fence_destroy(drm); return ret; diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index 62e28dddf87c..e1f0e8adf313 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c @@ -81,20 +81,7 @@ nv50_fence_create(struct nouveau_drm *drm) priv->base.context_del = nv10_fence_context_del; spin_lock_init(&priv->lock); - ret = nouveau_bo_new(&drm->client, 4096, 0x1000, - NOUVEAU_GEM_DOMAIN_VRAM, - 0, 0x0000, NULL, NULL, &priv->bo); - if (!ret) { - ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); - if (!ret) { - ret = nouveau_bo_map(priv->bo); - if (ret) - nouveau_bo_unpin(priv->bo); - } - if (ret) - nouveau_bo_fini(priv->bo); - } - + ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &priv->bo); if (ret) { nv10_fence_destroy(drm); return ret; diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index aa7dd0c5d917..1765b2cedaf9 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -185,10 +185,8 @@ static void nv84_fence_destroy(struct nouveau_drm *drm) { struct nv84_fence_priv *priv = drm->fence; - nouveau_bo_unmap(priv->bo); - if (priv->bo) - nouveau_bo_unpin(priv->bo); - nouveau_bo_fini(priv->bo); + + nouveau_bo_unpin_del(&priv->bo); drm->fence = NULL; kfree(priv); } @@ -222,19 +220,8 @@ nv84_fence_create(struct nouveau_drm *drm) * will lose CPU/GPU coherency! */ NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT; - ret = nouveau_bo_new(&drm->client, 16 * drm->chan_total, 0, - domain, 0, 0, NULL, NULL, &priv->bo); - if (ret == 0) { - ret = nouveau_bo_pin(priv->bo, domain, false); - if (ret == 0) { - ret = nouveau_bo_map(priv->bo); - if (ret) - nouveau_bo_unpin(priv->bo); - } - if (ret) - nouveau_bo_fini(priv->bo); - } + ret = nouveau_bo_new_map(&drm->client, domain, 16 * drm->chan_total, &priv->bo); if (ret) nv84_fence_destroy(drm); return ret; -- cgit v1.2.3 From d1fb887a08d8a21284e9a4be476f103353a32e7a Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 19 Jun 2024 14:15:22 +1000 Subject: drm/nouveau/nv50-: separate CHANNEL_GPFIFO handling out from CHANNEL_DMA Primarily a cleanup to allow for changes in newer CHANNEL_GPFIFO classes to be more easily implemented. Compared to the prior implementation, this submits userspace push buffer segments as subroutines and uses the NV_RAMUSERD_TOP_LEVEL_GET registers to track the main (kernel) push buffer progress. Fixes a number of sporadic failures seen during piglit runs. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvif/chan.h | 56 ++++++++++++ drivers/gpu/drm/nouveau/include/nvif/object.h | 2 +- drivers/gpu/drm/nouveau/include/nvif/push.h | 14 ++- drivers/gpu/drm/nouveau/nouveau_abi16.c | 2 +- drivers/gpu/drm/nouveau/nouveau_chan.c | 31 +++---- drivers/gpu/drm/nouveau/nouveau_chan.h | 11 +-- drivers/gpu/drm/nouveau/nouveau_dma.c | 103 +-------------------- drivers/gpu/drm/nouveau/nouveau_dma.h | 13 +-- drivers/gpu/drm/nouveau/nouveau_exec.c | 10 +- drivers/gpu/drm/nouveau/nouveau_gem.c | 8 +- drivers/gpu/drm/nouveau/nvif/Kbuild | 5 + drivers/gpu/drm/nouveau/nvif/chan.c | 127 ++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nvif/chan506f.c | 72 +++++++++++++++ drivers/gpu/drm/nouveau/nvif/chanc36f.c | 39 ++++++++ 14 files changed, 344 insertions(+), 149 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/include/nvif/chan.h create mode 100644 drivers/gpu/drm/nouveau/nvif/chan.c create mode 100644 drivers/gpu/drm/nouveau/nvif/chan506f.c create mode 100644 drivers/gpu/drm/nouveau/nvif/chanc36f.c diff --git a/drivers/gpu/drm/nouveau/include/nvif/chan.h b/drivers/gpu/drm/nouveau/include/nvif/chan.h new file mode 100644 index 000000000000..c1f7a8fce05b --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvif/chan.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __NVIF_CHAN_H__ +#define __NVIF_CHAN_H__ +#include "push.h" + +struct nvif_chan { + const struct nvif_chan_func { + struct { + u32 (*read_get)(struct nvif_chan *); + } push; + + struct { + u32 (*read_get)(struct nvif_chan *); + void (*push)(struct nvif_chan *, bool main, u64 addr, u32 size, + bool no_prefetch); + void (*kick)(struct nvif_chan *); + } gpfifo; + } *func; + + struct { + struct nvif_map map; + } userd; + + struct { + struct nvif_map map; + u32 cur; + u32 max; + int free; + } gpfifo; + + struct nvif_push push; + + struct nvif_user *usermode; + u32 doorbell_token; +}; + +int nvif_chan_dma_wait(struct nvif_chan *, u32 push_nr); + +void nvif_chan_gpfifo_ctor(const struct nvif_chan_func *, void *userd, void *gpfifo, u32 gpfifo_size, + void *push, u64 push_addr, u32 push_size, struct nvif_chan *); +int nvif_chan_gpfifo_wait(struct nvif_chan *, u32 gpfifo_nr, u32 push_nr); +void nvif_chan_gpfifo_push(struct nvif_chan *, u64 addr, u32 size, bool no_prefetch); + +int nvif_chan506f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size, + void *push, u64 push_addr, u32 push_size); +u32 nvif_chan506f_read_get(struct nvif_chan *); +u32 nvif_chan506f_gpfifo_read_get(struct nvif_chan *); +void nvif_chan506f_gpfifo_push(struct nvif_chan *, bool main, u64 addr, u32 size, bool no_prefetch); + +int nvif_chanc36f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size, + void *push, u64 push_addr, u32 push_size, + struct nvif_user *usermode, u32 doorbell_token); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h index 8d205b6af46a..1b32dc701f61 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/object.h +++ b/drivers/gpu/drm/nouveau/include/nvif/object.h @@ -16,7 +16,7 @@ struct nvif_object { u32 handle; s32 oclass; void *priv; /*XXX: hack */ - struct { + struct nvif_map { void __iomem *ptr; u64 size; } map; diff --git a/drivers/gpu/drm/nouveau/include/nvif/push.h b/drivers/gpu/drm/nouveau/include/nvif/push.h index 6d3a8a3d2087..a493fababe3c 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/push.h +++ b/drivers/gpu/drm/nouveau/include/nvif/push.h @@ -31,6 +31,12 @@ struct nvif_push { void (*kick)(struct nvif_push *push); struct nvif_mem mem; + u64 addr; + + struct { + u32 get; + u32 max; + } hw; u32 *bgn; u32 *cur; @@ -41,7 +47,7 @@ struct nvif_push { static inline __must_check int PUSH_WAIT(struct nvif_push *push, u32 size) { - if (push->cur + size >= push->end) { + if (push->cur + size > push->end) { int ret = push->wait(push, size); if (ret) return ret; @@ -55,7 +61,11 @@ PUSH_WAIT(struct nvif_push *push, u32 size) static inline int PUSH_KICK(struct nvif_push *push) { - push->kick(push); + if (push->cur != push->bgn) { + push->kick(push); + push->bgn = push->cur; + } + return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 4c100005ef81..a3ba07fc48a0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -416,7 +416,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) */ if (nouveau_cli_uvmm(cli)) { ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq, - chan->chan->dma.ib_max); + chan->chan->chan.gpfifo.max); if (ret) goto done; } diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 4b4bbbd8d7b7..ad1e99184f7a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -424,25 +424,24 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) } /* initialise dma tracking parameters */ - switch (chan->user.oclass) { - case NV03_CHANNEL_DMA: - case NV10_CHANNEL_DMA: - case NV17_CHANNEL_DMA: - case NV40_CHANNEL_DMA: + if (chan->user.oclass < NV50_CHANNEL_GPFIFO) { chan->user_put = 0x40; chan->user_get = 0x44; chan->dma.max = (0x10000 / 4) - 2; - break; - default: - chan->user_put = 0x40; - chan->user_get = 0x44; - chan->user_get_hi = 0x60; - chan->dma.ib_base = 0x10000 / 4; - chan->dma.ib_max = NV50_DMA_IB_MAX; - chan->dma.ib_put = 0; - chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put; - chan->dma.max = chan->dma.ib_base; - break; + } else + if (chan->user.oclass < VOLTA_CHANNEL_GPFIFO_A) { + ret = nvif_chan506f_ctor(&chan->chan, chan->userd->map.ptr, + (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000, + chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000); + if (ret) + return ret; + } else { + ret = nvif_chanc36f_ctor(&chan->chan, chan->userd->map.ptr, + (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000, + chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000, + &drm->client.device.user, chan->token); + if (ret) + return ret; } chan->dma.put = 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h index 016f668c0bc1..ea8c3cdab46f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.h +++ b/drivers/gpu/drm/nouveau/nouveau_chan.h @@ -3,13 +3,11 @@ #define __NOUVEAU_CHAN_H__ #include #include -#include +#include struct nvif_device; struct nouveau_channel { - struct { - struct nvif_push push; - } chan; + struct nvif_chan chan; struct nouveau_cli *cli; struct nouveau_vmm *vmm; @@ -41,12 +39,7 @@ struct nouveau_channel { int free; int cur; int put; - int ib_base; - int ib_max; - int ib_free; - int ib_put; } dma; - u32 user_get_hi; u32 user_get; u32 user_put; diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index a1f329ef0641..017a803121d4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -43,8 +43,6 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout) uint64_t val; val = nvif_rd32(chan->userd, chan->user_get); - if (chan->user_get_hi) - val |= (uint64_t)nvif_rd32(chan->userd, chan->user_get_hi) << 32; /* reset counter as long as GET is still advancing, this is * to avoid misdetecting a GPU lockup if the GPU happens to @@ -68,111 +66,12 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout) return (val - chan->push.addr) >> 2; } -void -nv50_dma_push(struct nouveau_channel *chan, u64 offset, u32 length, - bool no_prefetch) -{ - struct nvif_user *user = &chan->cli->drm->client.device.user; - struct nouveau_bo *pb = chan->push.buffer; - int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; - - BUG_ON(chan->dma.ib_free < 1); - WARN_ON(length > NV50_DMA_PUSH_MAX_LENGTH); - - nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); - nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8 | - (no_prefetch ? (1 << 31) : 0)); - - chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; - - mb(); - /* Flush writes. */ - nouveau_bo_rd32(pb, 0); - - nvif_wr32(chan->userd, 0x8c, chan->dma.ib_put); - if (user->func && user->func->doorbell) - user->func->doorbell(user, chan->token); - chan->dma.ib_free--; -} - -static int -nv50_dma_push_wait(struct nouveau_channel *chan, int count) -{ - uint32_t cnt = 0, prev_get = 0; - - while (chan->dma.ib_free < count) { - uint32_t get = nvif_rd32(chan->userd, 0x88); - if (get != prev_get) { - prev_get = get; - cnt = 0; - } - - if ((++cnt & 0xff) == 0) { - udelay(1); - if (cnt > 100000) - return -EBUSY; - } - - chan->dma.ib_free = get - chan->dma.ib_put; - if (chan->dma.ib_free <= 0) - chan->dma.ib_free += chan->dma.ib_max; - } - - return 0; -} - -static int -nv50_dma_wait(struct nouveau_channel *chan, int slots, int count) -{ - uint64_t prev_get = 0; - int ret, cnt = 0; - - ret = nv50_dma_push_wait(chan, slots + 1); - if (unlikely(ret)) - return ret; - - while (chan->dma.free < count) { - int get = READ_GET(chan, &prev_get, &cnt); - if (unlikely(get < 0)) { - if (get == -EINVAL) - continue; - - return get; - } - - if (get <= chan->dma.cur) { - chan->dma.free = chan->dma.max - chan->dma.cur; - if (chan->dma.free >= count) - break; - - FIRE_RING(chan); - do { - get = READ_GET(chan, &prev_get, &cnt); - if (unlikely(get < 0)) { - if (get == -EINVAL) - continue; - return get; - } - } while (get == 0); - chan->dma.cur = 0; - chan->dma.put = 0; - } - - chan->dma.free = get - chan->dma.cur - 1; - } - - return 0; -} - int -nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size) +nouveau_dma_wait(struct nouveau_channel *chan, int size) { uint64_t prev_get = 0; int cnt = 0, get; - if (chan->dma.ib_max) - return nv50_dma_wait(chan, slots, size); - while (chan->dma.free < size) { get = READ_GET(chan, &prev_get, &cnt); if (unlikely(get == -EBUSY)) diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index c52cda82353e..0e27b76d1e1c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h @@ -30,9 +30,7 @@ #include "nouveau_bo.h" #include "nouveau_chan.h" -int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); -void nv50_dma_push(struct nouveau_channel *, u64 addr, u32 length, - bool no_prefetch); +int nouveau_dma_wait(struct nouveau_channel *, int size); /* * There's a hw race condition where you can't jump to your PUT offset, @@ -67,7 +65,7 @@ RING_SPACE(struct nouveau_channel *chan, int size) { int ret; - ret = nouveau_dma_wait(chan, 1, size); + ret = nouveau_dma_wait(chan, size); if (ret) return ret; @@ -94,12 +92,7 @@ FIRE_RING(struct nouveau_channel *chan) return; chan->accel_done = true; - if (chan->dma.ib_max) { - nv50_dma_push(chan, chan->push.addr + (chan->dma.put << 2), - (chan->dma.cur - chan->dma.put) << 2, false); - } else { - WRITE_PUT(chan->dma.cur); - } + WRITE_PUT(chan->dma.cur); chan->dma.put = chan->dma.cur; } diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index a0b5f1b16e8b..eac7cf8940a3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -10,6 +10,8 @@ #include "nouveau_sched.h" #include "nouveau_uvmm.h" +#include + /** * DOC: Overview * @@ -131,7 +133,7 @@ nouveau_exec_job_run(struct nouveau_job *job) struct nouveau_fence *fence = exec_job->fence; int i, ret; - ret = nouveau_dma_wait(chan, exec_job->push.count + 1, 16); + ret = nvif_chan_gpfifo_wait(&chan->chan, exec_job->push.count + 1, 16); if (ret) { NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret); return ERR_PTR(ret); @@ -141,7 +143,7 @@ nouveau_exec_job_run(struct nouveau_job *job) struct drm_nouveau_exec_push *p = &exec_job->push.s[i]; bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH; - nv50_dma_push(chan, p->va, p->va_len, no_prefetch); + nvif_chan_gpfifo_push(&chan->chan, p->va, p->va_len, no_prefetch); } ret = nouveau_fence_emit(fence); @@ -375,10 +377,10 @@ nouveau_exec_ioctl_exec(struct drm_device *dev, if (unlikely(atomic_read(&chan->killed))) return nouveau_abi16_put(abi16, -ENODEV); - if (!chan->dma.ib_max) + if (chan->user.oclass < NV50_CHANNEL_GPFIFO) return nouveau_abi16_put(abi16, -ENOSYS); - push_max = nouveau_exec_push_max_from_ib_max(chan->dma.ib_max); + push_max = nouveau_exec_push_max_from_ib_max(chan->chan.gpfifo.max); if (unlikely(req->push_count > push_max)) { NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n", req->push_count, push_max); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 67e3c99de73a..5877545c2c50 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -850,8 +850,8 @@ revalidate: } } - if (chan->dma.ib_max) { - ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); + if (chan->user.oclass >= NV50_CHANNEL_GPFIFO) { + ret = nvif_chan_gpfifo_wait(&chan->chan, req->nr_push + 1, 16); if (ret) { NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret); goto out; @@ -864,7 +864,7 @@ revalidate: u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH; bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH; - nv50_dma_push(chan, addr, length, no_prefetch); + nvif_chan_gpfifo_push(&chan->chan, addr, length, no_prefetch); } } else if (drm->client.device.info.chipset >= 0x25) { @@ -958,7 +958,7 @@ out_prevalid: u_free(push); out_next: - if (chan->dma.ib_max) { + if (chan->user.oclass >= NV50_CHANNEL_GPFIFO) { req->suffix0 = 0x00000000; req->suffix1 = 0x00000000; } else diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild index b7963a39dd91..991722951fbb 100644 --- a/drivers/gpu/drm/nouveau/nvif/Kbuild +++ b/drivers/gpu/drm/nouveau/nvif/Kbuild @@ -14,6 +14,11 @@ nvif-y += nvif/outp.o nvif-y += nvif/timer.o nvif-y += nvif/vmm.o +# Channel classes +nvif-y += nvif/chan.o +nvif-y += nvif/chan506f.o +nvif-y += nvif/chanc36f.o + # Usermode classes nvif-y += nvif/user.o nvif-y += nvif/userc361.o diff --git a/drivers/gpu/drm/nouveau/nvif/chan.c b/drivers/gpu/drm/nouveau/nvif/chan.c new file mode 100644 index 000000000000..7f58a1c17979 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/chan.c @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include + +static void +nvif_chan_gpfifo_push_kick(struct nvif_push *push) +{ + struct nvif_chan *chan = container_of(push, typeof(*chan), push); + u32 put = push->bgn - (u32 *)chan->push.mem.object.map.ptr; + u32 cnt = push->cur - push->bgn; + + chan->func->gpfifo.push(chan, true, chan->push.addr + (put << 2), cnt << 2, false); + chan->func->gpfifo.kick(chan); +} + +static int +nvif_chan_gpfifo_push_wait(struct nvif_push *push, u32 push_nr) +{ + struct nvif_chan *chan = container_of(push, typeof(*chan), push); + + return nvif_chan_gpfifo_wait(chan, 1, push_nr); +} + +void +nvif_chan_gpfifo_push(struct nvif_chan *chan, u64 addr, u32 size, bool no_prefetch) +{ + chan->func->gpfifo.push(chan, false, addr, size, no_prefetch); +} + +int +nvif_chan_gpfifo_wait(struct nvif_chan *chan, u32 gpfifo_nr, u32 push_nr) +{ + struct nvif_push *push = &chan->push; + int ret = 0, time = 1000000; + + /* Account for the GPFIFO entry needed to submit pushbuf. */ + if (push_nr) + gpfifo_nr++; + + /* Wait for space in main push buffer. */ + if (push->cur + push_nr > push->end) { + ret = nvif_chan_dma_wait(chan, push_nr); + if (ret) + return ret; + } + + /* Wait for GPFIFO space. */ + while (chan->gpfifo.free < gpfifo_nr) { + chan->gpfifo.free = chan->func->gpfifo.read_get(chan) - chan->gpfifo.cur - 1; + if (chan->gpfifo.free < 0) + chan->gpfifo.free += chan->gpfifo.max + 1; + + if (chan->gpfifo.free < gpfifo_nr) { + if (!time--) + return -ETIMEDOUT; + udelay(1); + } + } + + return 0; +} + +void +nvif_chan_gpfifo_ctor(const struct nvif_chan_func *func, void *userd, void *gpfifo, u32 gpfifo_size, + void *push, u64 push_addr, u32 push_size, struct nvif_chan *chan) +{ + chan->func = func; + + chan->userd.map.ptr = userd; + + chan->gpfifo.map.ptr = gpfifo; + chan->gpfifo.max = (gpfifo_size >> 3) - 1; + chan->gpfifo.free = chan->gpfifo.max; + + chan->push.mem.object.map.ptr = push; + chan->push.wait = nvif_chan_gpfifo_push_wait; + chan->push.kick = nvif_chan_gpfifo_push_kick; + chan->push.addr = push_addr; + chan->push.hw.max = push_size >> 2; + chan->push.bgn = chan->push.cur = chan->push.end = push; +} + +int +nvif_chan_dma_wait(struct nvif_chan *chan, u32 nr) +{ + struct nvif_push *push = &chan->push; + u32 cur = push->cur - (u32 *)push->mem.object.map.ptr; + u32 free, time = 1000000; + + do { + u32 get = chan->func->push.read_get(chan); + + if (get <= cur) { + free = push->hw.max - cur; + if (free >= nr) + break; + + PUSH_KICK(push); + + while (get == 0) { + get = chan->func->push.read_get(chan); + if (get == 0) { + if (!time--) + return -ETIMEDOUT; + udelay(1); + } + } + + cur = 0; + } + + free = get - cur - 1; + + if (free < nr) { + if (!time--) + return -ETIMEDOUT; + udelay(1); + } + } while (free < nr); + + push->bgn = (u32 *)push->mem.object.map.ptr + cur; + push->cur = push->bgn; + push->end = push->bgn + free; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvif/chan506f.c b/drivers/gpu/drm/nouveau/nvif/chan506f.c new file mode 100644 index 000000000000..5a5f8ddc058f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/chan506f.c @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include + +static void +nvif_chan506f_gpfifo_kick(struct nvif_chan *chan) +{ + wmb(); + nvif_wr32(&chan->userd, 0x8c, chan->gpfifo.cur); +} + +void +nvif_chan506f_gpfifo_push(struct nvif_chan *chan, bool main, u64 addr, u32 size, bool no_prefetch) +{ + u32 gpptr = chan->gpfifo.cur << 3; + + if (WARN_ON(!chan->gpfifo.free)) + return; + + nvif_wr32(&chan->gpfifo, gpptr + 0, lower_32_bits(addr)); + nvif_wr32(&chan->gpfifo, gpptr + 4, upper_32_bits(addr) | + (main ? 0 : BIT(9)) | + (size >> 2) << 10 | + (no_prefetch ? BIT(31) : 0)); + + chan->gpfifo.cur = (chan->gpfifo.cur + 1) & chan->gpfifo.max; + chan->gpfifo.free--; + if (!chan->gpfifo.free) + chan->push.end = chan->push.cur; +} + +u32 +nvif_chan506f_gpfifo_read_get(struct nvif_chan *chan) +{ + return nvif_rd32(&chan->userd, 0x88); +} + +u32 +nvif_chan506f_read_get(struct nvif_chan *chan) +{ + u32 tlgetlo = nvif_rd32(&chan->userd, 0x58); + u32 tlgethi = nvif_rd32(&chan->userd, 0x5c); + struct nvif_push *push = &chan->push; + + /* Update cached GET pointer if TOP_LEVEL_GET is valid. */ + if (tlgethi & BIT(31)) { + u64 tlget = ((u64)(tlgethi & 0xff) << 32) | tlgetlo; + + push->hw.get = (tlget - push->addr) >> 2; + } + + return push->hw.get; +} + +static const struct nvif_chan_func +nvif_chan506f = { + .push.read_get = nvif_chan506f_read_get, + .gpfifo.read_get = nvif_chan506f_gpfifo_read_get, + .gpfifo.push = nvif_chan506f_gpfifo_push, + .gpfifo.kick = nvif_chan506f_gpfifo_kick, +}; + +int +nvif_chan506f_ctor(struct nvif_chan *chan, void *userd, void *gpfifo, u32 gpfifo_size, + void *push, u64 push_addr, u32 push_size) +{ + nvif_chan_gpfifo_ctor(&nvif_chan506f, userd, gpfifo, gpfifo_size, + push, push_addr, push_size, chan); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvif/chanc36f.c b/drivers/gpu/drm/nouveau/nvif/chanc36f.c new file mode 100644 index 000000000000..28a4207a4390 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/chanc36f.c @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include +#include + +static void +nvif_chanc36f_gpfifo_kick(struct nvif_chan *chan) +{ + struct nvif_user *usermode = chan->usermode; + + nvif_wr32(&chan->userd, 0x8c, chan->gpfifo.cur); + + wmb(); /* ensure CPU writes are flushed to BAR1 */ + nvif_rd32(&chan->userd, 0); /* ensure BAR1 writes are flushed to vidmem */ + + usermode->func->doorbell(usermode, chan->doorbell_token); +} + +static const struct nvif_chan_func +nvif_chanc36f = { + .push.read_get = nvif_chan506f_read_get, + .gpfifo.read_get = nvif_chan506f_gpfifo_read_get, + .gpfifo.push = nvif_chan506f_gpfifo_push, + .gpfifo.kick = nvif_chanc36f_gpfifo_kick, +}; + +int +nvif_chanc36f_ctor(struct nvif_chan *chan, void *userd, void *gpfifo, u32 gpfifo_size, + void *push, u64 push_addr, u32 push_size, + struct nvif_user *usermode, u32 doorbell_token) +{ + nvif_chan_gpfifo_ctor(&nvif_chanc36f, userd, gpfifo, gpfifo_size, + push, push_addr, push_size, chan); + chan->usermode = usermode; + chan->doorbell_token = doorbell_token; + return 0; +} -- cgit v1.2.3 From 862450a85b85a3e88e9065c9d2421f3ef17d054c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 19 Jun 2024 14:23:04 +1000 Subject: drm/nouveau/gf100-: track chan progress with non-WFI semaphore release From VOLTA_CHANNEL_GPFIFO_A onwards, HW no longer updates the GET/GP_GET pointers in USERD following channel progress, but instead updates on a timer for compatibility, and SW is expected to implement its own method of tracking channel progress (typically via non-WFI semaphore release). Nouveau has been making use of the compatibility mode up until now, however, from BLACKWELL_CHANNEL_GPFIFO_A HW no longer supports USERD writeback at all. Allocate a per-channel buffer in system memory, and append a non-WFI semaphore release to the end of each push buffer segment to simulate the pointers previously read from USERD. This change is implemented for Fermi (which is the first to support non- WFI semaphore release) onwards, as readback from system memory is likely faster than BAR1 reads. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/include/nvif/chan.h | 30 ++++++++-- drivers/gpu/drm/nouveau/nouveau_bo.c | 22 +++++++ drivers/gpu/drm/nouveau/nouveau_bo.h | 2 + drivers/gpu/drm/nouveau/nouveau_chan.c | 19 +++++- drivers/gpu/drm/nouveau/nouveau_chan.h | 5 ++ drivers/gpu/drm/nouveau/nouveau_exec.c | 2 + drivers/gpu/drm/nouveau/nouveau_gem.c | 2 + drivers/gpu/drm/nouveau/nvif/Kbuild | 1 + drivers/gpu/drm/nouveau/nvif/chan.c | 33 +++++++++- drivers/gpu/drm/nouveau/nvif/chan506f.c | 6 +- drivers/gpu/drm/nouveau/nvif/chan906f.c | 93 +++++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nvif/chanc36f.c | 48 +++++++++++++-- 12 files changed, 245 insertions(+), 18 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nvif/chan906f.c diff --git a/drivers/gpu/drm/nouveau/include/nvif/chan.h b/drivers/gpu/drm/nouveau/include/nvif/chan.h index c1f7a8fce05b..c329a29068d5 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/chan.h +++ b/drivers/gpu/drm/nouveau/include/nvif/chan.h @@ -17,7 +17,13 @@ struct nvif_chan { void (*push)(struct nvif_chan *, bool main, u64 addr, u32 size, bool no_prefetch); void (*kick)(struct nvif_chan *); + int (*post)(struct nvif_chan *, u32 gpptr, u32 pbptr); + u32 post_size; } gpfifo; + + struct { + int (*release)(struct nvif_chan *, u64 addr, u32 data); + } sem; } *func; struct { @@ -31,6 +37,11 @@ struct nvif_chan { int free; } gpfifo; + struct { + struct nvif_map map; + u64 addr; + } sema; + struct nvif_push push; struct nvif_user *usermode; @@ -43,14 +54,23 @@ void nvif_chan_gpfifo_ctor(const struct nvif_chan_func *, void *userd, void *gpf void *push, u64 push_addr, u32 push_size, struct nvif_chan *); int nvif_chan_gpfifo_wait(struct nvif_chan *, u32 gpfifo_nr, u32 push_nr); void nvif_chan_gpfifo_push(struct nvif_chan *, u64 addr, u32 size, bool no_prefetch); +int nvif_chan_gpfifo_post(struct nvif_chan *); -int nvif_chan506f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size, - void *push, u64 push_addr, u32 push_size); -u32 nvif_chan506f_read_get(struct nvif_chan *); -u32 nvif_chan506f_gpfifo_read_get(struct nvif_chan *); void nvif_chan506f_gpfifo_push(struct nvif_chan *, bool main, u64 addr, u32 size, bool no_prefetch); +void nvif_chan506f_gpfifo_kick(struct nvif_chan *); + +int nvif_chan906f_ctor_(const struct nvif_chan_func *, void *userd, void *gpfifo, u32 gpfifo_size, + void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr, + struct nvif_chan *); +u32 nvif_chan906f_read_get(struct nvif_chan *); +u32 nvif_chan906f_gpfifo_read_get(struct nvif_chan *); +int nvif_chan906f_gpfifo_post(struct nvif_chan *, u32 gpptr, u32 pbptr); +int nvif_chan506f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size, + void *push, u64 push_addr, u32 push_size); +int nvif_chan906f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size, + void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr); int nvif_chanc36f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size, - void *push, u64 push_addr, u32 push_size, + void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr, struct nvif_user *usermode, u32 doorbell_token); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 3a5ddf60380e..a32a50f41a43 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -456,6 +456,28 @@ nouveau_bo_new_map(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau return 0; } +int +nouveau_bo_new_map_gpu(struct nouveau_cli *cli, u32 domain, u32 size, + struct nouveau_bo **pnvbo, struct nouveau_vma **pvma) +{ + struct nouveau_vmm *vmm = nouveau_cli_vmm(cli); + struct nouveau_bo *nvbo; + int ret; + + ret = nouveau_bo_new_map(cli, domain, size, &nvbo); + if (ret) + return ret; + + ret = nouveau_vma_new(nvbo, vmm, pvma); + if (ret) { + nouveau_bo_unpin_del(&nvbo); + return ret; + } + + *pnvbo = nvbo; + return 0; +} + static void set_placement_range(struct nouveau_bo *nvbo, uint32_t domain) { diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index f402f14bebb0..d59fd12268b9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -92,6 +92,8 @@ void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo); int nouveau_bo_new_pin(struct nouveau_cli *, u32 domain, u32 size, struct nouveau_bo **); int nouveau_bo_new_map(struct nouveau_cli *, u32 domain, u32 size, struct nouveau_bo **); +int nouveau_bo_new_map_gpu(struct nouveau_cli *, u32 domain, u32 size, + struct nouveau_bo **, struct nouveau_vma **); void nouveau_bo_unpin_del(struct nouveau_bo **); /* TODO: submit equivalent to TTM generic API upstream? */ diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index ad1e99184f7a..2a775d908e24 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -103,6 +103,8 @@ nouveau_channel_del(struct nouveau_channel **pchan) nvif_event_dtor(&chan->kill); nvif_object_dtor(&chan->user); nvif_mem_dtor(&chan->mem_userd); + nouveau_vma_del(&chan->sema.vma); + nouveau_bo_unpin_del(&chan->sema.bo); nvif_object_dtor(&chan->push.ctxdma); nouveau_vma_del(&chan->push.vma); nouveau_bo_unpin_del(&chan->push.buffer); @@ -189,8 +191,10 @@ nouveau_channel_prep(struct nouveau_cli *cli, chan->push.addr = chan->push.vma->addr; - if (device->info.family >= NV_DEVICE_INFO_V0_FERMI) - return 0; + if (device->info.family >= NV_DEVICE_INFO_V0_FERMI) { + return nouveau_bo_new_map_gpu(cli, NOUVEAU_GEM_DOMAIN_GART, PAGE_SIZE, + &chan->sema.bo, &chan->sema.vma); + } args.target = NV_DMA_V0_TARGET_VM; args.access = NV_DMA_V0_ACCESS_VM; @@ -429,16 +433,25 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) chan->user_get = 0x44; chan->dma.max = (0x10000 / 4) - 2; } else - if (chan->user.oclass < VOLTA_CHANNEL_GPFIFO_A) { + if (chan->user.oclass < FERMI_CHANNEL_GPFIFO) { ret = nvif_chan506f_ctor(&chan->chan, chan->userd->map.ptr, (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000, chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000); if (ret) return ret; + } else + if (chan->user.oclass < VOLTA_CHANNEL_GPFIFO_A) { + ret = nvif_chan906f_ctor(&chan->chan, chan->userd->map.ptr, + (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000, + chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000, + chan->sema.bo->kmap.virtual, chan->sema.vma->addr); + if (ret) + return ret; } else { ret = nvif_chanc36f_ctor(&chan->chan, chan->userd->map.ptr, (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000, chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000, + chan->sema.bo->kmap.virtual, chan->sema.vma->addr, &drm->client.device.user, chan->token); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h index ea8c3cdab46f..561877725aac 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.h +++ b/drivers/gpu/drm/nouveau/nouveau_chan.h @@ -43,6 +43,11 @@ struct nouveau_channel { u32 user_get; u32 user_put; + struct { + struct nouveau_bo *bo; + struct nouveau_vma *vma; + } sema; + struct nvif_object user; struct nvif_object blit; diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index eac7cf8940a3..41b7c608c905 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -146,6 +146,8 @@ nouveau_exec_job_run(struct nouveau_job *job) nvif_chan_gpfifo_push(&chan->chan, p->va, p->va_len, no_prefetch); } + nvif_chan_gpfifo_post(&chan->chan); + ret = nouveau_fence_emit(fence); if (ret) { nouveau_fence_unref(&exec_job->fence); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 5877545c2c50..690e10fbf0bd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -866,6 +866,8 @@ revalidate: nvif_chan_gpfifo_push(&chan->chan, addr, length, no_prefetch); } + + nvif_chan_gpfifo_post(&chan->chan); } else if (drm->client.device.info.chipset >= 0x25) { ret = PUSH_WAIT(&chan->chan.push, req->nr_push * 2); diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild index 991722951fbb..198889c20ce1 100644 --- a/drivers/gpu/drm/nouveau/nvif/Kbuild +++ b/drivers/gpu/drm/nouveau/nvif/Kbuild @@ -17,6 +17,7 @@ nvif-y += nvif/vmm.o # Channel classes nvif-y += nvif/chan.o nvif-y += nvif/chan506f.o +nvif-y += nvif/chan906f.o nvif-y += nvif/chanc36f.o # Usermode classes diff --git a/drivers/gpu/drm/nouveau/nvif/chan.c b/drivers/gpu/drm/nouveau/nvif/chan.c index 7f58a1c17979..baa10227d51a 100644 --- a/drivers/gpu/drm/nouveau/nvif/chan.c +++ b/drivers/gpu/drm/nouveau/nvif/chan.c @@ -9,7 +9,16 @@ nvif_chan_gpfifo_push_kick(struct nvif_push *push) { struct nvif_chan *chan = container_of(push, typeof(*chan), push); u32 put = push->bgn - (u32 *)chan->push.mem.object.map.ptr; - u32 cnt = push->cur - push->bgn; + u32 cnt; + + if (chan->func->gpfifo.post) { + if (push->end - push->cur < chan->func->gpfifo.post_size) + push->end = push->cur + chan->func->gpfifo.post_size; + + WARN_ON(nvif_chan_gpfifo_post(chan)); + } + + cnt = push->cur - push->bgn; chan->func->gpfifo.push(chan, true, chan->push.addr + (put << 2), cnt << 2, false); chan->func->gpfifo.kick(chan); @@ -23,6 +32,16 @@ nvif_chan_gpfifo_push_wait(struct nvif_push *push, u32 push_nr) return nvif_chan_gpfifo_wait(chan, 1, push_nr); } +int +nvif_chan_gpfifo_post(struct nvif_chan *chan) +{ + const u32 *map = chan->push.mem.object.map.ptr; + const u32 pbptr = (chan->push.cur - map) + chan->func->gpfifo.post_size; + const u32 gpptr = (chan->gpfifo.cur + 1) & chan->gpfifo.max; + + return chan->func->gpfifo.post(chan, gpptr, pbptr); +} + void nvif_chan_gpfifo_push(struct nvif_chan *chan, u64 addr, u32 size, bool no_prefetch) { @@ -35,6 +54,14 @@ nvif_chan_gpfifo_wait(struct nvif_chan *chan, u32 gpfifo_nr, u32 push_nr) struct nvif_push *push = &chan->push; int ret = 0, time = 1000000; + if (gpfifo_nr) { + /* Account for pushbuf space needed by nvif_chan_gpfifo_post(), + * if used after pushing userspace GPFIFO entries. + */ + if (chan->func->gpfifo.post) + push_nr += chan->func->gpfifo.post_size; + } + /* Account for the GPFIFO entry needed to submit pushbuf. */ if (push_nr) gpfifo_nr++; @@ -89,6 +116,8 @@ nvif_chan_dma_wait(struct nvif_chan *chan, u32 nr) u32 cur = push->cur - (u32 *)push->mem.object.map.ptr; u32 free, time = 1000000; + nr += chan->func->gpfifo.post_size; + do { u32 get = chan->func->push.read_get(chan); @@ -122,6 +151,6 @@ nvif_chan_dma_wait(struct nvif_chan *chan, u32 nr) push->bgn = (u32 *)push->mem.object.map.ptr + cur; push->cur = push->bgn; - push->end = push->bgn + free; + push->end = push->bgn + free - chan->func->gpfifo.post_size; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvif/chan506f.c b/drivers/gpu/drm/nouveau/nvif/chan506f.c index 5a5f8ddc058f..d3900887c4a7 100644 --- a/drivers/gpu/drm/nouveau/nvif/chan506f.c +++ b/drivers/gpu/drm/nouveau/nvif/chan506f.c @@ -4,7 +4,7 @@ */ #include -static void +void nvif_chan506f_gpfifo_kick(struct nvif_chan *chan) { wmb(); @@ -31,13 +31,13 @@ nvif_chan506f_gpfifo_push(struct nvif_chan *chan, bool main, u64 addr, u32 size, chan->push.end = chan->push.cur; } -u32 +static u32 nvif_chan506f_gpfifo_read_get(struct nvif_chan *chan) { return nvif_rd32(&chan->userd, 0x88); } -u32 +static u32 nvif_chan506f_read_get(struct nvif_chan *chan) { u32 tlgetlo = nvif_rd32(&chan->userd, 0x58); diff --git a/drivers/gpu/drm/nouveau/nvif/chan906f.c b/drivers/gpu/drm/nouveau/nvif/chan906f.c new file mode 100644 index 000000000000..c9cfb85179b0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/chan906f.c @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include +#include +#include + +#include + +/* Limits GPFIFO size to 1MiB, and "main" push buffer size to 64KiB. */ +#define NVIF_CHAN906F_PBPTR_BITS 15 +#define NVIF_CHAN906F_PBPTR_MASK ((1 << NVIF_CHAN906F_PBPTR_BITS) - 1) + +#define NVIF_CHAN906F_GPPTR_SHIFT NVIF_CHAN906F_PBPTR_BITS +#define NVIF_CHAN906F_GPPTR_BITS (32 - NVIF_CHAN906F_PBPTR_BITS) +#define NVIF_CHAN906F_GPPTR_MASK ((1 << NVIF_CHAN906F_GPPTR_BITS) - 1) + +#define NVIF_CHAN906F_SEM_RELEASE_SIZE 5 + +static int +nvif_chan906f_sem_release(struct nvif_chan *chan, u64 addr, u32 data) +{ + struct nvif_push *push = &chan->push; + int ret; + + ret = PUSH_WAIT(push, NVIF_CHAN906F_SEM_RELEASE_SIZE); + if (ret) + return ret; + + PUSH_MTHD(push, NV906F, SEMAPHOREA, + NVVAL(NV906F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(addr)), + + SEMAPHOREB, lower_32_bits(addr), + + SEMAPHOREC, data, + + SEMAPHORED, + NVDEF(NV906F, SEMAPHORED, OPERATION, RELEASE) | + NVDEF(NV906F, SEMAPHORED, RELEASE_WFI, DIS) | + NVDEF(NV906F, SEMAPHORED, RELEASE_SIZE, 16BYTE)); + + return 0; +} + +int +nvif_chan906f_gpfifo_post(struct nvif_chan *chan, u32 gpptr, u32 pbptr) +{ + return chan->func->sem.release(chan, chan->sema.addr, + (gpptr << NVIF_CHAN906F_GPPTR_SHIFT) | pbptr); +} + +u32 +nvif_chan906f_gpfifo_read_get(struct nvif_chan *chan) +{ + return nvif_rd32(&chan->sema, 0) >> NVIF_CHAN906F_GPPTR_SHIFT; +} + +u32 +nvif_chan906f_read_get(struct nvif_chan *chan) +{ + return nvif_rd32(&chan->sema, 0) & NVIF_CHAN906F_PBPTR_MASK; +} + +static const struct nvif_chan_func +nvif_chan906f = { + .push.read_get = nvif_chan906f_read_get, + .gpfifo.read_get = nvif_chan906f_gpfifo_read_get, + .gpfifo.push = nvif_chan506f_gpfifo_push, + .gpfifo.kick = nvif_chan506f_gpfifo_kick, + .gpfifo.post = nvif_chan906f_gpfifo_post, + .gpfifo.post_size = NVIF_CHAN906F_SEM_RELEASE_SIZE, + .sem.release = nvif_chan906f_sem_release, +}; + +int +nvif_chan906f_ctor_(const struct nvif_chan_func *func, void *userd, void *gpfifo, u32 gpfifo_size, + void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr, + struct nvif_chan *chan) +{ + nvif_chan_gpfifo_ctor(func, userd, gpfifo, gpfifo_size, push, push_addr, push_size, chan); + chan->sema.map.ptr = sema; + chan->sema.addr = sema_addr; + return 0; +} + +int +nvif_chan906f_ctor(struct nvif_chan *chan, void *userd, void *gpfifo, u32 gpfifo_size, + void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr) +{ + return nvif_chan906f_ctor_(&nvif_chan906f, userd, gpfifo, gpfifo_size, + push, push_addr, push_size, sema, sema_addr, chan); +} diff --git a/drivers/gpu/drm/nouveau/nvif/chanc36f.c b/drivers/gpu/drm/nouveau/nvif/chanc36f.c index 28a4207a4390..ca02b939c3fd 100644 --- a/drivers/gpu/drm/nouveau/nvif/chanc36f.c +++ b/drivers/gpu/drm/nouveau/nvif/chanc36f.c @@ -5,6 +5,9 @@ #include #include +#include +#include + static void nvif_chanc36f_gpfifo_kick(struct nvif_chan *chan) { @@ -18,21 +21,56 @@ nvif_chanc36f_gpfifo_kick(struct nvif_chan *chan) usermode->func->doorbell(usermode, chan->doorbell_token); } +#define NVIF_CHANC36F_SEM_RELEASE_SIZE 6 + +static int +nvif_chanc36f_sem_release(struct nvif_chan *chan, u64 addr, u32 data) +{ + struct nvif_push *push = &chan->push; + int ret; + + ret = PUSH_WAIT(push, NVIF_CHANC36F_SEM_RELEASE_SIZE); + if (ret) + return ret; + + PUSH_MTHD(push, NVC36F, SEM_ADDR_LO, lower_32_bits(addr), + + SEM_ADDR_HI, upper_32_bits(addr), + + SEM_PAYLOAD_LO, data); + + PUSH_MTHD(push, NVC36F, SEM_EXECUTE, + NVDEF(NVC36F, SEM_EXECUTE, OPERATION, RELEASE) | + NVDEF(NVC36F, SEM_EXECUTE, RELEASE_WFI, DIS) | + NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT) | + NVDEF(NVC36F, SEM_EXECUTE, RELEASE_TIMESTAMP, DIS)); + + return 0; +} + static const struct nvif_chan_func nvif_chanc36f = { - .push.read_get = nvif_chan506f_read_get, - .gpfifo.read_get = nvif_chan506f_gpfifo_read_get, + .push.read_get = nvif_chan906f_read_get, + .gpfifo.read_get = nvif_chan906f_gpfifo_read_get, .gpfifo.push = nvif_chan506f_gpfifo_push, .gpfifo.kick = nvif_chanc36f_gpfifo_kick, + .gpfifo.post = nvif_chan906f_gpfifo_post, + .gpfifo.post_size = NVIF_CHANC36F_SEM_RELEASE_SIZE, + .sem.release = nvif_chanc36f_sem_release, }; int nvif_chanc36f_ctor(struct nvif_chan *chan, void *userd, void *gpfifo, u32 gpfifo_size, - void *push, u64 push_addr, u32 push_size, + void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr, struct nvif_user *usermode, u32 doorbell_token) { - nvif_chan_gpfifo_ctor(&nvif_chanc36f, userd, gpfifo, gpfifo_size, - push, push_addr, push_size, chan); + int ret; + + ret = nvif_chan906f_ctor_(&nvif_chanc36f, userd, gpfifo, gpfifo_size, + push, push_addr, push_size, sema, sema_addr, chan); + if (ret) + return ret; + chan->usermode = usermode; chan->doorbell_token = doorbell_token; return 0; -- cgit v1.2.3 From 32cb1cc358ffed248f7762ba4c1d0625a2bfddaa Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 25 Nov 2024 10:27:02 +1000 Subject: drm/nouveau: add support for GB10x This commit enables basic support for the GB100/GB102 Blackwell GPUs. Beyond HW class ID plumbing there's very little change here vs GH100. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../include/nvhw/ref/gb100/dev_hshub_base.h | 28 +++++++++++++++++ drivers/gpu/drm/nouveau/include/nvif/cl0080.h | 1 + drivers/gpu/drm/nouveau/include/nvif/class.h | 9 ++++++ drivers/gpu/drm/nouveau/include/nvkm/core/device.h | 1 + drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h | 1 + drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h | 1 + drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h | 1 + drivers/gpu/drm/nouveau/nouveau_bo.c | 1 + drivers/gpu/drm/nouveau/nouveau_chan.c | 33 ++++++++++---------- drivers/gpu/drm/nouveau/nouveau_drm.c | 1 + drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 33 ++++++++++++++++++++ drivers/gpu/drm/nouveau/nvkm/engine/device/user.c | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c | 34 +++++++++++++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c | 24 +++++++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c | 35 ++++++++++++++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c | 17 +++++++---- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h | 6 ++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c | 27 +++++++++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h | 1 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c | 16 ++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 2 ++ 27 files changed, 257 insertions(+), 23 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h new file mode 100644 index 000000000000..c9d74bd95e0b --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __gb100_dev_hshub_base_h__ +#define __gb100_dev_hshub_base_h__ + +#define NV_PFB_HSHUB0 0x00870fff:0x00870000 + +#define NV_PFB_HSHUB 0x00000FFF:0x00000000 /* RW--D */ +#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO 0x00000E50 /* RW-4R */ +#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */ +#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_INIT 0x00000000 /* RWI-V */ +#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_MASK 0xFFFFFF00 /* ----V */ +#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI 0x00000E54 /* RW-4R */ +#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */ +#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_INIT 0x00000000 /* RWI-V */ +#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000FFFFF /* ----V */ +#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO 0x000006C0 /* RW-4R */ +#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */ +#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_INIT 0x00000000 /* RWI-V */ +#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_MASK 0xFFFFFF00 /* ----V */ +#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI 0x000006C4 /* RW-4R */ +#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */ +#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_INIT 0x00000000 /* RWI-V */ +#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000FFFFF /* ----V */ + +#endif // __gb100_dev_hshub_base_h__ diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h index 60a52ef52071..ea8267e0d8da 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h +++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h @@ -30,6 +30,7 @@ struct nv_device_info_v0 { #define NV_DEVICE_INFO_V0_AMPERE 0x0d #define NV_DEVICE_INFO_V0_ADA 0x0e #define NV_DEVICE_INFO_V0_HOPPER 0x0f +#define NV_DEVICE_INFO_V0_BLACKWELL 0x10 __u8 family; __u8 pad06[2]; __u64 ram_size; diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index 83acf367a65c..606483fc850b 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -57,6 +57,7 @@ #define KEPLER_INLINE_TO_MEMORY_A 0x0000a040 #define KEPLER_INLINE_TO_MEMORY_B 0x0000a140 +#define BLACKWELL_INLINE_TO_MEMORY_A 0x0000cd40 #define NV04_DISP /* cl0046.h */ 0x00000046 @@ -87,6 +88,7 @@ #define AMPERE_CHANNEL_GPFIFO_A /* if0020.h */ 0x0000c56f #define AMPERE_CHANNEL_GPFIFO_B /* if0020.h */ 0x0000c76f #define HOPPER_CHANNEL_GPFIFO_A 0x0000c86f +#define BLACKWELL_CHANNEL_GPFIFO_A 0x0000c96f #define NV50_DISP /* if0010.h */ 0x00005070 #define G82_DISP /* if0010.h */ 0x00008270 @@ -198,6 +200,8 @@ #define HOPPER_A 0x0000cb97 +#define BLACKWELL_A 0x0000cd97 + #define NV74_BSP 0x000074b0 #define NVB8B0_VIDEO_DECODER 0x0000b8b0 @@ -205,6 +209,7 @@ #define NVC6B0_VIDEO_DECODER 0x0000c6b0 #define NVC7B0_VIDEO_DECODER 0x0000c7b0 #define NVC9B0_VIDEO_DECODER 0x0000c9b0 +#define NVCDB0_VIDEO_DECODER 0x0000cdb0 #define GT212_MSVLD 0x000085b1 #define IGT21A_MSVLD 0x000086b1 @@ -234,6 +239,7 @@ #define AMPERE_DMA_COPY_A 0x0000c6b5 #define AMPERE_DMA_COPY_B 0x0000c7b5 #define HOPPER_DMA_COPY_A 0x0000c8b5 +#define BLACKWELL_DMA_COPY_A 0x0000c9b5 #define NVC4B7_VIDEO_ENCODER 0x0000c4b7 #define NVC7B7_VIDEO_ENCODER 0x0000c7b7 @@ -257,15 +263,18 @@ #define AMPERE_COMPUTE_B 0x0000c7c0 #define ADA_COMPUTE_A 0x0000c9c0 #define HOPPER_COMPUTE_A 0x0000cbc0 +#define BLACKWELL_COMPUTE_A 0x0000cdc0 #define NV74_CIPHER 0x000074c1 #define NVB8D1_VIDEO_NVJPG 0x0000b8d1 #define NVC4D1_VIDEO_NVJPG 0x0000c4d1 #define NVC9D1_VIDEO_NVJPG 0x0000c9d1 +#define NVCDD1_VIDEO_NVJPG 0x0000cdd1 #define NVB8FA_VIDEO_OFA 0x0000b8fa #define NVC6FA_VIDEO_OFA 0x0000c6fa #define NVC7FA_VIDEO_OFA 0x0000c7fa #define NVC9FA_VIDEO_OFA 0x0000c9fa +#define NVCDFA_VIDEO_OFA 0x0000cdfa #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index 926542350abc..0664195e5684 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h @@ -48,6 +48,7 @@ struct nvkm_device { GA100 = 0x170, GH100 = 0x180, AD100 = 0x190, + GB10x = 0x1a0, } card_type; u32 chipset; u8 chiprev; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h index c114903ce388..7bd73f9f749b 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h @@ -103,6 +103,7 @@ int tu102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n int ga100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); int ga102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); int gh100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int gb100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); #include #include diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h index 2a8c1d5a65f9..7122f814e4d0 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h @@ -19,4 +19,5 @@ int nvkm_fsp_boot_gsp_fmc(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig); int gh100_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **); +int gb100_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 8f611b2503b7..c5982fd74725 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -492,4 +492,5 @@ int ga100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_ int ga102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); int gh100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); int ad102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); +int gb100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index a32a50f41a43..1a9a74c26e8e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1000,6 +1000,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) struct ttm_resource *, struct ttm_resource *); int (*init)(struct nouveau_channel *, u32 handle); } _methods[] = { + { "COPY", 4, 0xc9b5, nve0_bo_move_copy, nve0_bo_move_init }, { "COPY", 4, 0xc8b5, nve0_bo_move_copy, nve0_bo_move_init }, { "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init }, { "GRCE", 0, 0xc7b5, nve0_bo_move_copy, nvc0_bo_move_init }, diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 2a775d908e24..726de0aa0fcf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -249,22 +249,23 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm, struct nouveau_channel **pchan) { const struct nvif_mclass hosts[] = { - { HOPPER_CHANNEL_GPFIFO_A, 0 }, - { AMPERE_CHANNEL_GPFIFO_B, 0 }, - { AMPERE_CHANNEL_GPFIFO_A, 0 }, - { TURING_CHANNEL_GPFIFO_A, 0 }, - { VOLTA_CHANNEL_GPFIFO_A, 0 }, - { PASCAL_CHANNEL_GPFIFO_A, 0 }, - { MAXWELL_CHANNEL_GPFIFO_A, 0 }, - { KEPLER_CHANNEL_GPFIFO_B, 0 }, - { KEPLER_CHANNEL_GPFIFO_A, 0 }, - { FERMI_CHANNEL_GPFIFO , 0 }, - { G82_CHANNEL_GPFIFO , 0 }, - { NV50_CHANNEL_GPFIFO , 0 }, - { NV40_CHANNEL_DMA , 0 }, - { NV17_CHANNEL_DMA , 0 }, - { NV10_CHANNEL_DMA , 0 }, - { NV03_CHANNEL_DMA , 0 }, + { BLACKWELL_CHANNEL_GPFIFO_A, 0 }, + { HOPPER_CHANNEL_GPFIFO_A, 0 }, + { AMPERE_CHANNEL_GPFIFO_B, 0 }, + { AMPERE_CHANNEL_GPFIFO_A, 0 }, + { TURING_CHANNEL_GPFIFO_A, 0 }, + { VOLTA_CHANNEL_GPFIFO_A, 0 }, + { PASCAL_CHANNEL_GPFIFO_A, 0 }, + { MAXWELL_CHANNEL_GPFIFO_A, 0 }, + { KEPLER_CHANNEL_GPFIFO_B, 0 }, + { KEPLER_CHANNEL_GPFIFO_A, 0 }, + { FERMI_CHANNEL_GPFIFO , 0 }, + { G82_CHANNEL_GPFIFO , 0 }, + { NV50_CHANNEL_GPFIFO , 0 }, + { NV40_CHANNEL_DMA , 0 }, + { NV17_CHANNEL_DMA , 0 }, + { NV10_CHANNEL_DMA , 0 }, + { NV03_CHANNEL_DMA , 0 }, {} }; DEFINE_RAW_FLEX(struct nvif_chan_v0, args, name, TASK_COMM_LEN + 16); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 5b6bb4c2f78b..0be604af5b29 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -510,6 +510,7 @@ nouveau_accel_init(struct nouveau_drm *drm) case AMPERE_CHANNEL_GPFIFO_A: case AMPERE_CHANNEL_GPFIFO_B: case HOPPER_CHANNEL_GPFIFO_A: + case BLACKWELL_CHANNEL_GPFIFO_A: ret = gv100_fence_create(drm); break; default: diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 5082fe5f1966..f6f23cbc7a73 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2802,6 +2802,36 @@ nv197_chipset = { .sec2 = { 0x00000001, ga102_sec2_new }, }; +static const struct nvkm_device_chip +nv1a0_chipset = { + .name = "GB100", + .bar = { 0x00000001, tu102_bar_new }, + .fb = { 0x00000001, gb100_fb_new }, + .fsp = { 0x00000001, gb100_fsp_new }, + .gsp = { 0x00000001, gb100_gsp_new }, + .imem = { 0x00000001, gh100_instmem_new }, + .mmu = { 0x00000001, gh100_mmu_new }, + .pci = { 0x00000001, gh100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + +static const struct nvkm_device_chip +nv1a2_chipset = { + .name = "GB102", + .bar = { 0x00000001, tu102_bar_new }, + .fb = { 0x00000001, gb100_fb_new }, + .fsp = { 0x00000001, gb100_fsp_new }, + .gsp = { 0x00000001, gb100_gsp_new }, + .imem = { 0x00000001, gh100_instmem_new }, + .mmu = { 0x00000001, gh100_mmu_new }, + .pci = { 0x00000001, gh100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + struct nvkm_subdev * nvkm_device_subdev(struct nvkm_device *device, int type, int inst) { @@ -3119,6 +3149,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x170: device->card_type = GA100; break; case 0x180: device->card_type = GH100; break; case 0x190: device->card_type = AD100; break; + case 0x1a0: device->card_type = GB10x; break; default: break; } @@ -3227,6 +3258,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x194: device->chip = &nv194_chipset; break; case 0x196: device->chip = &nv196_chipset; break; case 0x197: device->chip = &nv197_chipset; break; + case 0x1a0: device->chip = &nv1a0_chipset; break; + case 0x1a2: device->chip = &nv1a2_chipset; break; default: if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) { switch (device->chipset) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c index 57c2678022b5..07f45cc16210 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c @@ -149,6 +149,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size) case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break; case AD100: args->v0.family = NV_DEVICE_INFO_V0_ADA; break; case GH100: args->v0.family = NV_DEVICE_INFO_V0_HOPPER; break; + case GB10x: args->v0.family = NV_DEVICE_INFO_V0_BLACKWELL; break; default: args->v0.family = 0; break; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild index f13312934131..b438a57f2efc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild @@ -36,6 +36,7 @@ nvkm-y += nvkm/subdev/fb/tu102.o nvkm-y += nvkm/subdev/fb/ga100.o nvkm-y += nvkm/subdev/fb/ga102.o nvkm-y += nvkm/subdev/fb/gh100.o +nvkm-y += nvkm/subdev/fb/gb100.o nvkm-y += nvkm/subdev/fb/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c new file mode 100644 index 000000000000..1c78c8853617 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include +#include + +static void +gb100_fb_sysmem_flush_page_init(struct nvkm_fb *fb) +{ + const u32 addr_hi = upper_32_bits(fb->sysmem.flush_page_addr); + const u32 addr_lo = lower_32_bits(fb->sysmem.flush_page_addr); + const u32 hshub = DRF_LO(NV_PFB_HSHUB0); + struct nvkm_device *device = fb->subdev.device; + + nvkm_wr32(device, hshub + NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI, addr_hi); + nvkm_wr32(device, hshub + NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO, addr_lo); + nvkm_wr32(device, hshub + NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI, addr_hi); + nvkm_wr32(device, hshub + NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO, addr_lo); +} + +static const struct nvkm_fb_func +gb100_fb = { + .sysmem.flush_page_init = gb100_fb_sysmem_flush_page_init, + .vidmem.size = ga102_fb_vidmem_size, +}; + +int +gb100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) +{ + return r535_fb_new(&gb100_fb, device, type, inst, pfb); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild index ff04992b181d..337772acdddc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild @@ -4,3 +4,4 @@ nvkm-y += nvkm/subdev/fsp/base.o nvkm-y += nvkm/subdev/fsp/gh100.o +nvkm-y += nvkm/subdev/fsp/gb100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c new file mode 100644 index 000000000000..e06636bf54b6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +static const struct nvkm_fsp_func +gb100_fsp = { + .wait_secure_boot = gh100_fsp_wait_secure_boot, + .cot = { + .version = 2, + .size_hash = 48, + .size_pkey = 97, + .size_sig = 96, + .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc, + }, +}; + +int +gb100_fsp_new(struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp) +{ + return nvkm_fsp_new_(&gb100_fsp, device, type, inst, pfsp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c index 9f4285af3fed..2815be4bf5de 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c @@ -237,7 +237,7 @@ gh100_fsp_boot_gsp_fmc(struct nvkm_fsp *fsp, u64 args_addr, u32 rsvd_size, bool return gh100_fsp_send_sync(fsp, NVDM_TYPE_COT, (const u8 *)&msg, sizeof(msg)); } -static int +int gh100_fsp_wait_secure_boot(struct nvkm_fsp *fsp) { struct nvkm_device *device = fsp->subdev.device; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h index 91517f3dedfb..f0b2c605c33d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h @@ -23,6 +23,7 @@ struct nvkm_fsp_func { int nvkm_fsp_new_(const struct nvkm_fsp_func *, struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **); +int gh100_fsp_wait_secure_boot(struct nvkm_fsp *); int gh100_fsp_boot_gsp_fmc(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume, u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild index 3c6c1309c4b4..4aebea4f6a64 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild @@ -9,5 +9,6 @@ nvkm-y += nvkm/subdev/gsp/ga100.o nvkm-y += nvkm/subdev/gsp/ga102.o nvkm-y += nvkm/subdev/gsp/gh100.o nvkm-y += nvkm/subdev/gsp/ad102.o +nvkm-y += nvkm/subdev/gsp/gb100.o include $(src)/nvkm/subdev/gsp/rm/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c new file mode 100644 index 000000000000..12a3f2c1ed82 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +static const struct nvkm_gsp_func +gb100_gsp = { + .flcn = &ga102_gsp_flcn, + + .sig_section = ".fwsignature_gb10x", + + .dtor = r535_gsp_dtor, + .oneinit = gh100_gsp_oneinit, + .init = gh100_gsp_init, + .fini = gh100_gsp_fini, + + .rm.gpu = &gb10x_gpu, +}; + +static struct nvkm_gsp_fwif +gb100_gsps[] = { + { 0, gh100_gsp_load, &gb100_gsp, &r570_rm_gb10x, "570.144", true }, + {} +}; + +int +gb100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gsp **pgsp) +{ + return nvkm_gsp_new_(gb100_gsps, device, type, inst, pgsp); +} + +NVKM_GSP_FIRMWARE_FMC(gb100, 570.144); +NVKM_GSP_FIRMWARE_FMC(gb102, 570.144); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c index 3ad71696c111..ce31e8248807 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c @@ -16,7 +16,7 @@ #include #include -static int +int gh100_gsp_fini(struct nvkm_gsp *gsp, bool suspend) { struct nvkm_falcon *falcon = &gsp->falcon; @@ -65,7 +65,7 @@ gh100_gsp_lockdown_released(struct nvkm_gsp *gsp, u32 *mbox0) return !NVVAL_GET(data, NV_PFALCON, FALCON_HWCFG2, RISCV_BR_PRIV_LOCKDOWN); } -static int +int gh100_gsp_init(struct nvkm_gsp *gsp) { struct nvkm_subdev *subdev = &gsp->subdev; @@ -74,6 +74,7 @@ gh100_gsp_init(struct nvkm_gsp *gsp) struct nvkm_gsp_mem *meta; GSP_FMC_BOOT_PARAMS *args; int ret, time = 4000; + u32 rsvd_size; u32 mbox0; if (!resume) { @@ -97,7 +98,11 @@ gh100_gsp_init(struct nvkm_gsp *gsp) args->gspRmParams.target = GSP_DMA_TARGET_NONCOHERENT_SYSTEM; args->gspRmParams.bootArgsOffset = gsp->libos.addr; - ret = nvkm_fsp_boot_gsp_fmc(device->fsp, gsp->fmc.args.addr, gsp->fb.heap.size, resume, + rsvd_size = gsp->fb.heap.size; + if (gsp->rm->wpr->rsvd_size_pmu) + rsvd_size = ALIGN(rsvd_size + gsp->rm->wpr->rsvd_size_pmu, 0x200000); + + ret = nvkm_fsp_boot_gsp_fmc(device->fsp, gsp->fmc.args.addr, rsvd_size, resume, gsp->fmc.fw.addr, gsp->fmc.hash, gsp->fmc.pkey, gsp->fmc.sig); if (ret) return ret; @@ -157,7 +162,7 @@ gh100_gsp_wpr_meta_init(struct nvkm_gsp *gsp) meta->gspFwHeapSize = tu102_gsp_wpr_heap_size(gsp); meta->frtsSize = 0x100000; meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size; - meta->pmuReservedSize = 0; + meta->pmuReservedSize = gsp->rm->wpr->rsvd_size_pmu; return 0; } @@ -254,7 +259,7 @@ elf_section(const void *elf, const char *name, unsigned int *len) return NULL; } -static int +int gh100_gsp_oneinit(struct nvkm_gsp *gsp) { struct nvkm_subdev *subdev = &gsp->subdev; @@ -319,7 +324,7 @@ gh100_gsp = { .rm.gpu = &gh100_gpu, }; -static int +int gh100_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) { int ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h index 86ec580ba936..4f14e85fc69e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -26,6 +26,8 @@ int gv100_gsp_nofw(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *); int tu102_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *); int tu102_gsp_load_rm(struct nvkm_gsp *, const struct nvkm_gsp_fwif *); +int gh100_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *); + #define NVKM_GSP_FIRMWARE_BOOTER(chip,vers) \ MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-"#vers".bin"); \ MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-"#vers".bin"); \ @@ -75,6 +77,10 @@ int ga102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware struct nvkm_falcon *, struct nvkm_falcon_fw *); int ga102_gsp_reset(struct nvkm_gsp *); +int gh100_gsp_oneinit(struct nvkm_gsp *); +int gh100_gsp_init(struct nvkm_gsp *); +int gh100_gsp_fini(struct nvkm_gsp *, bool suspend); + void r535_gsp_dtor(struct nvkm_gsp *); int r535_gsp_oneinit(struct nvkm_gsp *); int r535_gsp_init(struct nvkm_gsp *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild index 2a71868d6710..2efef4b694d6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild @@ -12,6 +12,7 @@ nvkm-y += nvkm/subdev/gsp/rm/ga100.o nvkm-y += nvkm/subdev/gsp/rm/ga1xx.o nvkm-y += nvkm/subdev/gsp/rm/ad10x.o nvkm-y += nvkm/subdev/gsp/rm/gh100.o +nvkm-y += nvkm/subdev/gsp/rm/gb10x.o include $(src)/nvkm/subdev/gsp/rm/r535/Kbuild include $(src)/nvkm/subdev/gsp/rm/r570/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c new file mode 100644 index 000000000000..3a296d8fd2e0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "gpu.h" + +#include + +const struct nvkm_rm_gpu +gb10x_gpu = { + .usermode.class = HOPPER_USERMODE_A, + + .fifo.chan = { + .class = BLACKWELL_CHANNEL_GPFIFO_A, + }, + + .ce.class = BLACKWELL_DMA_COPY_A, + .gr.class = { + .i2m = BLACKWELL_INLINE_TO_MEMORY_A, + .twod = FERMI_TWOD_A, + .threed = BLACKWELL_A, + .compute = BLACKWELL_COMPUTE_A, + }, + .nvdec.class = NVCDB0_VIDEO_DECODER, + .nvjpg.class = NVCDD1_VIDEO_NVJPG, + .ofa.class = NVCDFA_VIDEO_OFA, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h index 443753f3369a..e84376c85e99 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h @@ -63,4 +63,5 @@ extern const struct nvkm_rm_gpu ga100_gpu; extern const struct nvkm_rm_gpu ga1xx_gpu; extern const struct nvkm_rm_gpu ad10x_gpu; extern const struct nvkm_rm_gpu gh100_gpu; +extern const struct nvkm_rm_gpu gb10x_gpu; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c index 8a641e5a5b92..7e5b411fa543 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c @@ -29,6 +29,16 @@ r570_wpr_libos3_gh100 = { .offset_set_by_acr = true, }; +static const struct nvkm_rm_wpr +r570_wpr_libos3_gb10x = { + .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL, + .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100, + .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, + .heap_size_non_wpr = 0x200000, + .rsvd_size_pmu = ALIGN(0x0800000 + 0x1000000 + 0x0001000, 0x20000), + .offset_set_by_acr = true, +}; + static const struct nvkm_rm_api r570_api = { .gsp = &r570_gsp, @@ -65,3 +75,9 @@ r570_rm_gh100 = { .wpr = &r570_wpr_libos3_gh100, .api = &r570_api, }; + +const struct nvkm_rm_impl +r570_rm_gb10x = { + .wpr = &r570_wpr_libos3_gb10x, + .api = &r570_api, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 626ebce39be5..2e9bd74d39be 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -26,6 +26,7 @@ struct nvkm_rm_wpr { u32 base_size; u64 heap_size_min; u32 heap_size_non_wpr; + u32 rsvd_size_pmu; bool offset_set_by_acr; }; @@ -176,6 +177,7 @@ extern const struct nvkm_rm_api_engine r535_ofa; extern const struct nvkm_rm_impl r570_rm_tu102; extern const struct nvkm_rm_impl r570_rm_ga102; extern const struct nvkm_rm_impl r570_rm_gh100; +extern const struct nvkm_rm_impl r570_rm_gb10x; extern const struct nvkm_rm_api_gsp r570_gsp; extern const struct nvkm_rm_api_client r570_client; extern const struct nvkm_rm_api_fbsr r570_fbsr; -- cgit v1.2.3 From 56c36f590a551eaf49bfffbba8702cffeaaa8280 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 26 Feb 2025 07:49:00 +1000 Subject: drm/nouveau/gsp: add hal for fifo.chan.doorbell_handle The doorbell register on GB20x GPUs has additional fields. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h | 2 ++ drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c | 3 +++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c | 3 +++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c | 3 +++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c | 3 +++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c | 3 +++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c | 4 +++- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c | 3 +++ 10 files changed, 25 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h index a0f3277605a5..9ebb35c31db0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h @@ -6,6 +6,7 @@ #include struct nvkm_cctx; struct nvkm_cgrp; +struct nvkm_chan; struct nvkm_engn; struct nvkm_memory; struct nvkm_runl; @@ -195,6 +196,7 @@ extern const struct nvkm_chan_func_ramfc gv100_chan_ramfc; void tu102_fifo_intr_ctxsw_timeout_info(struct nvkm_engn *, u32 info); extern const struct nvkm_fifo_func_mmu_fault tu102_fifo_mmu_fault; +u32 tu102_chan_doorbell_handle(struct nvkm_chan *); int ga100_fifo_runl_ctor(struct nvkm_fifo *); int ga100_fifo_nonstall_ctor(struct nvkm_fifo *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c index 1d39a6840a40..c5a03298e88c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c @@ -31,7 +31,7 @@ #include -static u32 +u32 tu102_chan_doorbell_handle(struct nvkm_chan *chan) { return (chan->cgrp->runl->id << 16) | chan->id; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c index d699c386adec..e1ce6355c35f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c @@ -4,6 +4,8 @@ */ #include "gpu.h" +#include + #include const struct nvkm_rm_gpu @@ -21,6 +23,7 @@ ad10x_gpu = { .fifo.chan = { .class = AMPERE_CHANNEL_GPFIFO_A, + .doorbell_handle = tu102_chan_doorbell_handle, }, .ce.class = AMPERE_DMA_COPY_B, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c index 5e7f18dbf18b..a48c6134075d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c @@ -4,6 +4,8 @@ */ #include "gpu.h" +#include + #include const struct nvkm_rm_gpu @@ -12,6 +14,7 @@ ga100_gpu = { .fifo.chan = { .class = AMPERE_CHANNEL_GPFIFO_A, + .doorbell_handle = tu102_chan_doorbell_handle, }, .ce.class = AMPERE_DMA_COPY_A, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c index 61525d23aaa0..50536ad7f85d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c @@ -4,6 +4,8 @@ */ #include "gpu.h" +#include + #include const struct nvkm_rm_gpu @@ -21,6 +23,7 @@ ga1xx_gpu = { .fifo.chan = { .class = AMPERE_CHANNEL_GPFIFO_A, + .doorbell_handle = tu102_chan_doorbell_handle, }, .ce.class = AMPERE_DMA_COPY_B, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c index 3a296d8fd2e0..2f517dcd721a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c @@ -4,6 +4,8 @@ */ #include "gpu.h" +#include + #include const struct nvkm_rm_gpu @@ -12,6 +14,7 @@ gb10x_gpu = { .fifo.chan = { .class = BLACKWELL_CHANNEL_GPFIFO_A, + .doorbell_handle = tu102_chan_doorbell_handle, }, .ce.class = BLACKWELL_DMA_COPY_A, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c index 088250559e12..49e2c54e1aa8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c @@ -4,6 +4,8 @@ */ #include "gpu.h" +#include + #include const struct nvkm_rm_gpu @@ -12,6 +14,7 @@ gh100_gpu = { .fifo.chan = { .class = HOPPER_CHANNEL_GPFIFO_A, + .doorbell_handle = tu102_chan_doorbell_handle, }, .ce.class = HOPPER_DMA_COPY_A, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h index e84376c85e99..77aa7b13a3af 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h @@ -25,6 +25,7 @@ struct nvkm_rm_gpu { struct { struct { u32 class; + u32 (*doorbell_handle)(struct nvkm_chan *); } chan; } fifo; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c index 4238362ec073..eaba4d50860d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c @@ -41,7 +41,9 @@ static u32 r535_chan_doorbell_handle(struct nvkm_chan *chan) { - return (chan->cgrp->runl->id << 16) | chan->id; + struct nvkm_gsp *gsp = chan->rm.object.client->gsp; + + return gsp->rm->gpu->fifo.chan.doorbell_handle(chan); } static void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c index 883b9eddbfe6..423502f870db 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c @@ -4,6 +4,8 @@ */ #include "gpu.h" +#include + #include const struct nvkm_rm_gpu @@ -21,6 +23,7 @@ tu1xx_gpu = { .fifo.chan = { .class = TURING_CHANNEL_GPFIFO_A, + .doorbell_handle = tu102_chan_doorbell_handle, }, .ce.class = TURING_DMA_COPY_A, -- cgit v1.2.3 From 284ad706ad2f50974d66dd1a22e985a5a4d329de Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 4 Feb 2025 08:54:57 +1000 Subject: drm/nouveau: add support for GB20x This commit adds support for the GB20x GPUs found on GeForce RTX 50xx series boards. Beyond a few miscellaneous register moves and HW class ID plumbing, this reuses most of the code added to support GH100/GB10x. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- .../drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h | 18 +++++ .../drm/nouveau/include/nvhw/ref/gb202/dev_ce.h | 12 +++ .../drm/nouveau/include/nvhw/ref/gb202/dev_therm.h | 17 +++++ drivers/gpu/drm/nouveau/include/nvif/class.h | 15 ++++ drivers/gpu/drm/nouveau/include/nvkm/core/device.h | 1 + drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h | 1 + drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h | 1 + drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h | 1 + drivers/gpu/drm/nouveau/nouveau_bo.c | 1 + drivers/gpu/drm/nouveau/nouveau_chan.c | 1 + drivers/gpu/drm/nouveau/nouveau_drm.c | 1 + drivers/gpu/drm/nouveau/nvif/user.c | 9 ++- drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c | 16 ++++ drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h | 2 + drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | 86 ++++++++++++++++++++++ drivers/gpu/drm/nouveau/nvkm/engine/device/user.c | 1 + drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c | 14 ++++ drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h | 2 + drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c | 30 ++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c | 45 +++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c | 38 ++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c | 44 +++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h | 2 + .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c | 16 ++++ .../gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c | 16 ++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h | 1 + 32 files changed, 393 insertions(+), 4 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h create mode 100644 drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h create mode 100644 drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h new file mode 100644 index 000000000000..4d0bb8e14298 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __gb10b_dev_fb_h__ +#define __gb10b_dev_fb_h__ + +#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO 0x008a1d58 /* RW-4R */ +#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */ +#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_INIT 0x00000000 /* RWI-V */ +#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_MASK 0xffffff00 /* RW--V */ +#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI 0x008a1d5c /* RW-4R */ +#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */ +#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_INIT 0x00000000 /* RWI-V */ +#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000fffff /* RW--V */ + +#endif // __gb10b_dev_fb_h__ + diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h new file mode 100644 index 000000000000..b09f04b31738 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __gb202_dev_ce_h__ +#define __gb202_dev_ce_h__ + +#define NV_CE_GRCE_MASK 0x001040d8 /* C--4R */ +#define NV_CE_GRCE_MASK_VALUE 9:0 /* C--VF */ +#define NV_CE_GRCE_MASK_VALUE_INIT 0x00f /* C---V */ + +#endif // __gb202_dev_ce_h__ diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h new file mode 100644 index 000000000000..ed359cb528fb --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef __gb202_dev_therm_h__ +#define __gb202_dev_therm_h__ + +#define NV_THERM_I2CS_SCRATCH 0x00ad00bc /* RW-4R */ +#define NV_THERM_I2CS_SCRATCH_DATA 31:0 /* RWIVF */ +#define NV_THERM_I2CS_SCRATCH_DATA_INIT 0x00000000 /* RWI-V */ +#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE NV_THERM_I2CS_SCRATCH +#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS 31:0 +#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS 0x000000FF +#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_FAILED 0x00000000 + +#endif // __gb202_dev_therm_h__ + diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index 606483fc850b..ff6823cb2cd8 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -65,6 +65,7 @@ #define TURING_USERMODE_A 0x0000c461 #define AMPERE_USERMODE_A 0x0000c561 #define HOPPER_USERMODE_A 0x0000c661 +#define BLACKWELL_USERMODE_A 0x0000c761 #define MAXWELL_FAULT_BUFFER_A /* clb069.h */ 0x0000b069 #define VOLTA_FAULT_BUFFER_A /* clb069.h */ 0x0000c369 @@ -89,6 +90,7 @@ #define AMPERE_CHANNEL_GPFIFO_B /* if0020.h */ 0x0000c76f #define HOPPER_CHANNEL_GPFIFO_A 0x0000c86f #define BLACKWELL_CHANNEL_GPFIFO_A 0x0000c96f +#define BLACKWELL_CHANNEL_GPFIFO_B 0x0000ca6f #define NV50_DISP /* if0010.h */ 0x00005070 #define G82_DISP /* if0010.h */ 0x00008270 @@ -106,8 +108,10 @@ #define TU102_DISP /* if0010.h */ 0x0000c570 #define GA102_DISP /* if0010.h */ 0x0000c670 #define AD102_DISP /* if0010.h */ 0x0000c770 +#define GB202_DISP 0x0000ca70 #define GV100_DISP_CAPS 0x0000c373 +#define GB202_DISP_CAPS 0x0000ca73 #define NV31_MPEG 0x00003174 #define G82_MPEG 0x00008274 @@ -122,6 +126,7 @@ #define GV100_DISP_CURSOR /* if0014.h */ 0x0000c37a #define TU102_DISP_CURSOR /* if0014.h */ 0x0000c57a #define GA102_DISP_CURSOR /* if0014.h */ 0x0000c67a +#define GB202_DISP_CURSOR 0x0000ca7a #define NV50_DISP_OVERLAY /* if0014.h */ 0x0000507b #define G82_DISP_OVERLAY /* if0014.h */ 0x0000827b @@ -132,6 +137,7 @@ #define GV100_DISP_WINDOW_IMM_CHANNEL_DMA /* if0014.h */ 0x0000c37b #define TU102_DISP_WINDOW_IMM_CHANNEL_DMA /* if0014.h */ 0x0000c57b #define GA102_DISP_WINDOW_IMM_CHANNEL_DMA /* if0014.h */ 0x0000c67b +#define GB202_DISP_WINDOW_IMM_CHANNEL_DMA 0x0000ca7b #define NV50_DISP_BASE_CHANNEL_DMA /* if0014.h */ 0x0000507c #define G82_DISP_BASE_CHANNEL_DMA /* if0014.h */ 0x0000827c @@ -157,6 +163,7 @@ #define TU102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c57d #define GA102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c67d #define AD102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c77d +#define GB202_DISP_CORE_CHANNEL_DMA 0x0000ca7d #define NV50_DISP_OVERLAY_CHANNEL_DMA /* if0014.h */ 0x0000507e #define G82_DISP_OVERLAY_CHANNEL_DMA /* if0014.h */ 0x0000827e @@ -168,6 +175,7 @@ #define GV100_DISP_WINDOW_CHANNEL_DMA /* if0014.h */ 0x0000c37e #define TU102_DISP_WINDOW_CHANNEL_DMA /* if0014.h */ 0x0000c57e #define GA102_DISP_WINDOW_CHANNEL_DMA /* if0014.h */ 0x0000c67e +#define GB202_DISP_WINDOW_CHANNEL_DMA 0x0000ca7e #define NV50_TESLA 0x00005097 #define G82_TESLA 0x00008297 @@ -201,6 +209,7 @@ #define HOPPER_A 0x0000cb97 #define BLACKWELL_A 0x0000cd97 +#define BLACKWELL_B 0x0000ce97 #define NV74_BSP 0x000074b0 @@ -210,6 +219,7 @@ #define NVC7B0_VIDEO_DECODER 0x0000c7b0 #define NVC9B0_VIDEO_DECODER 0x0000c9b0 #define NVCDB0_VIDEO_DECODER 0x0000cdb0 +#define NVCFB0_VIDEO_DECODER 0x0000cfb0 #define GT212_MSVLD 0x000085b1 #define IGT21A_MSVLD 0x000086b1 @@ -240,10 +250,12 @@ #define AMPERE_DMA_COPY_B 0x0000c7b5 #define HOPPER_DMA_COPY_A 0x0000c8b5 #define BLACKWELL_DMA_COPY_A 0x0000c9b5 +#define BLACKWELL_DMA_COPY_B 0x0000cab5 #define NVC4B7_VIDEO_ENCODER 0x0000c4b7 #define NVC7B7_VIDEO_ENCODER 0x0000c7b7 #define NVC9B7_VIDEO_ENCODER 0x0000c9b7 +#define NVCFB7_VIDEO_ENCODER 0x0000cfb7 #define FERMI_DECOMPRESS 0x000090b8 @@ -264,6 +276,7 @@ #define ADA_COMPUTE_A 0x0000c9c0 #define HOPPER_COMPUTE_A 0x0000cbc0 #define BLACKWELL_COMPUTE_A 0x0000cdc0 +#define BLACKWELL_COMPUTE_B 0x0000cec0 #define NV74_CIPHER 0x000074c1 @@ -271,10 +284,12 @@ #define NVC4D1_VIDEO_NVJPG 0x0000c4d1 #define NVC9D1_VIDEO_NVJPG 0x0000c9d1 #define NVCDD1_VIDEO_NVJPG 0x0000cdd1 +#define NVCFD1_VIDEO_NVJPG 0x0000cfd1 #define NVB8FA_VIDEO_OFA 0x0000b8fa #define NVC6FA_VIDEO_OFA 0x0000c6fa #define NVC7FA_VIDEO_OFA 0x0000c7fa #define NVC9FA_VIDEO_OFA 0x0000c9fa #define NVCDFA_VIDEO_OFA 0x0000cdfa +#define NVCFFA_VIDEO_OFA 0x0000cffa #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index 0664195e5684..99579e7b9376 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h @@ -49,6 +49,7 @@ struct nvkm_device { GH100 = 0x180, AD100 = 0x190, GB10x = 0x1a0, + GB20x = 0x1b0, } card_type; u32 chipset; u8 chiprev; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h index 7bd73f9f749b..e0d777a933e1 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h @@ -104,6 +104,7 @@ int ga100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n int ga102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); int gh100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); int gb100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int gb202_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); #include #include diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h index 7122f814e4d0..8a3dbb1cbb46 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h @@ -20,4 +20,5 @@ int nvkm_fsp_boot_gsp_fmc(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool int gh100_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **); int gb100_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **); +int gb202_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index c5982fd74725..226c7ec56b8e 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -493,4 +493,5 @@ int ga102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_ int gh100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); int ad102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); int gb100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); +int gb202_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 1a9a74c26e8e..b96f0555ca14 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1000,6 +1000,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) struct ttm_resource *, struct ttm_resource *); int (*init)(struct nouveau_channel *, u32 handle); } _methods[] = { + { "COPY", 4, 0xcab5, nve0_bo_move_copy, nve0_bo_move_init }, { "COPY", 4, 0xc9b5, nve0_bo_move_copy, nve0_bo_move_init }, { "COPY", 4, 0xc8b5, nve0_bo_move_copy, nve0_bo_move_init }, { "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init }, diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 726de0aa0fcf..b1e92b1f7a26 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -249,6 +249,7 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm, struct nouveau_channel **pchan) { const struct nvif_mclass hosts[] = { + { BLACKWELL_CHANNEL_GPFIFO_B, 0 }, { BLACKWELL_CHANNEL_GPFIFO_A, 0 }, { HOPPER_CHANNEL_GPFIFO_A, 0 }, { AMPERE_CHANNEL_GPFIFO_B, 0 }, diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 0be604af5b29..0c82a63cd49d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -511,6 +511,7 @@ nouveau_accel_init(struct nouveau_drm *drm) case AMPERE_CHANNEL_GPFIFO_B: case HOPPER_CHANNEL_GPFIFO_A: case BLACKWELL_CHANNEL_GPFIFO_A: + case BLACKWELL_CHANNEL_GPFIFO_B: ret = gv100_fence_create(drm); break; default: diff --git a/drivers/gpu/drm/nouveau/nvif/user.c b/drivers/gpu/drm/nouveau/nvif/user.c index ae470a1fdfb8..53f03fa1c9c2 100644 --- a/drivers/gpu/drm/nouveau/nvif/user.c +++ b/drivers/gpu/drm/nouveau/nvif/user.c @@ -41,10 +41,11 @@ nvif_user_ctor(struct nvif_device *device, const char *name) int version; const struct nvif_user_func *func; } users[] = { - { HOPPER_USERMODE_A, -1, &nvif_userc361 }, - { AMPERE_USERMODE_A, -1, &nvif_userc361 }, - { TURING_USERMODE_A, -1, &nvif_userc361 }, - { VOLTA_USERMODE_A, -1, &nvif_userc361 }, + { BLACKWELL_USERMODE_A, -1, &nvif_userc361 }, + { HOPPER_USERMODE_A, -1, &nvif_userc361 }, + { AMPERE_USERMODE_A, -1, &nvif_userc361 }, + { TURING_USERMODE_A, -1, &nvif_userc361 }, + { VOLTA_USERMODE_A, -1, &nvif_userc361 }, {} }; int cid, ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild index 8bf1635ffabc..9754bac65df7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild @@ -10,3 +10,4 @@ nvkm-y += nvkm/engine/ce/gv100.o nvkm-y += nvkm/engine/ce/tu102.o nvkm-y += nvkm/engine/ce/ga100.o nvkm-y += nvkm/engine/ce/ga102.o +nvkm-y += nvkm/engine/ce/gb202.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c new file mode 100644 index 000000000000..37c3c619c71b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include +#include + +u32 +gb202_ce_grce_mask(struct nvkm_device *device) +{ + u32 data = nvkm_rd32(device, NV_CE_GRCE_MASK); + + return NVVAL_GET(data, NV_CE, GRCE_MASK, VALUE); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h index 806a76a72249..34fd2657134b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h @@ -16,4 +16,6 @@ int ga100_ce_oneinit(struct nvkm_engine *); int ga100_ce_init(struct nvkm_engine *); int ga100_ce_fini(struct nvkm_engine *, bool); int ga100_ce_nonstall(struct nvkm_engine *); + +u32 gb202_ce_grce_mask(struct nvkm_device *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index f6f23cbc7a73..3375a59ebf1a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2832,6 +2832,86 @@ nv1a2_chipset = { .fifo = { 0x00000001, ga102_fifo_new }, }; +static const struct nvkm_device_chip +nv1b2_chipset = { + .name = "GB202", + .bar = { 0x00000001, tu102_bar_new }, + .fb = { 0x00000001, gb202_fb_new }, + .fsp = { 0x00000001, gb202_fsp_new }, + .gsp = { 0x00000001, gb202_gsp_new }, + .imem = { 0x00000001, gh100_instmem_new }, + .mmu = { 0x00000001, gh100_mmu_new }, + .pci = { 0x00000001, gh100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .disp = { 0x00000001, ga102_disp_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + +static const struct nvkm_device_chip +nv1b3_chipset = { + .name = "GB203", + .bar = { 0x00000001, tu102_bar_new }, + .fb = { 0x00000001, gb202_fb_new }, + .fsp = { 0x00000001, gb202_fsp_new }, + .gsp = { 0x00000001, gb202_gsp_new }, + .imem = { 0x00000001, gh100_instmem_new }, + .mmu = { 0x00000001, gh100_mmu_new }, + .pci = { 0x00000001, gh100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .disp = { 0x00000001, ga102_disp_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + +static const struct nvkm_device_chip +nv1b5_chipset = { + .name = "GB205", + .bar = { 0x00000001, tu102_bar_new }, + .fb = { 0x00000001, gb202_fb_new }, + .fsp = { 0x00000001, gb202_fsp_new }, + .gsp = { 0x00000001, gb202_gsp_new }, + .imem = { 0x00000001, gh100_instmem_new }, + .mmu = { 0x00000001, gh100_mmu_new }, + .pci = { 0x00000001, gh100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .disp = { 0x00000001, ga102_disp_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + +static const struct nvkm_device_chip +nv1b6_chipset = { + .name = "GB206", + .bar = { 0x00000001, tu102_bar_new }, + .fb = { 0x00000001, gb202_fb_new }, + .fsp = { 0x00000001, gb202_fsp_new }, + .gsp = { 0x00000001, gb202_gsp_new }, + .imem = { 0x00000001, gh100_instmem_new }, + .mmu = { 0x00000001, gh100_mmu_new }, + .pci = { 0x00000001, gh100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .disp = { 0x00000001, ga102_disp_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + +static const struct nvkm_device_chip +nv1b7_chipset = { + .name = "GB207", + .bar = { 0x00000001, tu102_bar_new }, + .fb = { 0x00000001, gb202_fb_new }, + .fsp = { 0x00000001, gb202_fsp_new }, + .gsp = { 0x00000001, gb202_gsp_new }, + .imem = { 0x00000001, gh100_instmem_new }, + .mmu = { 0x00000001, gh100_mmu_new }, + .pci = { 0x00000001, gh100_pci_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .vfn = { 0x00000001, ga100_vfn_new }, + .disp = { 0x00000001, ga102_disp_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + struct nvkm_subdev * nvkm_device_subdev(struct nvkm_device *device, int type, int inst) { @@ -3150,6 +3230,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x180: device->card_type = GH100; break; case 0x190: device->card_type = AD100; break; case 0x1a0: device->card_type = GB10x; break; + case 0x1b0: device->card_type = GB20x; break; default: break; } @@ -3260,6 +3341,11 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x197: device->chip = &nv197_chipset; break; case 0x1a0: device->chip = &nv1a0_chipset; break; case 0x1a2: device->chip = &nv1a2_chipset; break; + case 0x1b2: device->chip = &nv1b2_chipset; break; + case 0x1b3: device->chip = &nv1b3_chipset; break; + case 0x1b5: device->chip = &nv1b5_chipset; break; + case 0x1b6: device->chip = &nv1b6_chipset; break; + case 0x1b7: device->chip = &nv1b7_chipset; break; default: if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) { switch (device->chipset) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c index 07f45cc16210..58191b7a0494 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c @@ -150,6 +150,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size) case AD100: args->v0.family = NV_DEVICE_INFO_V0_ADA; break; case GH100: args->v0.family = NV_DEVICE_INFO_V0_HOPPER; break; case GB10x: args->v0.family = NV_DEVICE_INFO_V0_BLACKWELL; break; + case GB20x: args->v0.family = NV_DEVICE_INFO_V0_BLACKWELL; break; default: args->v0.family = 0; break; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild index 5a074b9970ab..376e9c3bcb1a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild @@ -25,6 +25,7 @@ nvkm-y += nvkm/engine/fifo/gv100.o nvkm-y += nvkm/engine/fifo/tu102.o nvkm-y += nvkm/engine/fifo/ga100.o nvkm-y += nvkm/engine/fifo/ga102.o +nvkm-y += nvkm/engine/fifo/gb202.o nvkm-y += nvkm/engine/fifo/ucgrp.o nvkm-y += nvkm/engine/fifo/uchan.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c new file mode 100644 index 000000000000..b469e8afeb0b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" +#include "cgrp.h" +#include "chan.h" +#include "runl.h" + +u32 +gb202_chan_doorbell_handle(struct nvkm_chan *chan) +{ + return BIT(30) | (chan->cgrp->runl->id << 16) | chan->id; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h index 9ebb35c31db0..5e81ae195329 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h @@ -208,6 +208,8 @@ extern const struct nvkm_engn_func ga100_engn_ce; extern const struct nvkm_cgrp_func ga100_cgrp; extern const struct nvkm_chan_func ga100_chan; +u32 gb202_chan_doorbell_handle(struct nvkm_chan *); + int nvkm_uchan_new(struct nvkm_fifo *, struct nvkm_cgrp *, const struct nvkm_oclass *, void *argv, u32 argc, struct nvkm_object **); int nvkm_ucgrp_new(struct nvkm_fifo *, const struct nvkm_oclass *, void *argv, u32 argc, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild index b438a57f2efc..8d8a5382d1b1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild @@ -37,6 +37,7 @@ nvkm-y += nvkm/subdev/fb/ga100.o nvkm-y += nvkm/subdev/fb/ga102.o nvkm-y += nvkm/subdev/fb/gh100.o nvkm-y += nvkm/subdev/fb/gb100.o +nvkm-y += nvkm/subdev/fb/gb202.o nvkm-y += nvkm/subdev/fb/r535.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c new file mode 100644 index 000000000000..848505026d02 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include +#include + +static void +gb202_fb_sysmem_flush_page_init(struct nvkm_fb *fb) +{ + struct nvkm_device *device = fb->subdev.device; + const u64 addr = fb->sysmem.flush_page_addr; + + nvkm_wr32(device, NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI, upper_32_bits(addr)); + nvkm_wr32(device, NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO, lower_32_bits(addr)); +} + +static const struct nvkm_fb_func +gb202_fb = { + .sysmem.flush_page_init = gb202_fb_sysmem_flush_page_init, + .vidmem.size = ga102_fb_vidmem_size, +}; + +int +gb202_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) +{ + return r535_fb_new(&gb202_fb, device, type, inst, pfb); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild index 337772acdddc..1a9ded3a86f8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild @@ -5,3 +5,4 @@ nvkm-y += nvkm/subdev/fsp/base.o nvkm-y += nvkm/subdev/fsp/gh100.o nvkm-y += nvkm/subdev/fsp/gb100.o +nvkm-y += nvkm/subdev/fsp/gb202.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c new file mode 100644 index 000000000000..3438aac6383e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +#include +#include + +static int +gb202_fsp_wait_secure_boot(struct nvkm_fsp *fsp) +{ + struct nvkm_device *device = fsp->subdev.device; + unsigned timeout_ms = 4000; + + do { + u32 status = NVKM_RD32(device, NV_THERM, I2CS_SCRATCH, FSP_BOOT_COMPLETE_STATUS); + + if (status == NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS) + return 0; + + usleep_range(1000, 2000); + } while (timeout_ms--); + + return -ETIMEDOUT; +} + +static const struct nvkm_fsp_func +gb202_fsp = { + .wait_secure_boot = gb202_fsp_wait_secure_boot, + .cot = { + .version = 2, + .size_hash = 48, + .size_pkey = 97, + .size_sig = 96, + .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc, + }, +}; + +int +gb202_fsp_new(struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp) +{ + return nvkm_fsp_new_(&gb202_fsp, device, type, inst, pfsp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild index 4aebea4f6a64..e9c948b67bbd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild @@ -10,5 +10,6 @@ nvkm-y += nvkm/subdev/gsp/ga102.o nvkm-y += nvkm/subdev/gsp/gh100.o nvkm-y += nvkm/subdev/gsp/ad102.o nvkm-y += nvkm/subdev/gsp/gb100.o +nvkm-y += nvkm/subdev/gsp/gb202.o include $(src)/nvkm/subdev/gsp/rm/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c new file mode 100644 index 000000000000..c1d718172ddf --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "priv.h" + +static const struct nvkm_gsp_func +gb202_gsp = { + .flcn = &ga102_gsp_flcn, + + .sig_section = ".fwsignature_gb20x", + + .dtor = r535_gsp_dtor, + .oneinit = gh100_gsp_oneinit, + .init = gh100_gsp_init, + .fini = gh100_gsp_fini, + + .rm.gpu = &gb20x_gpu, +}; + +static struct nvkm_gsp_fwif +gb202_gsps[] = { + { 0, gh100_gsp_load, &gb202_gsp, &r570_rm_gb20x, "570.144", true }, + {} +}; + +int +gb202_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gsp **pgsp) +{ + return nvkm_gsp_new_(gb202_gsps, device, type, inst, pgsp); +} + +NVKM_GSP_FIRMWARE_FMC(gb202, 570.144); +NVKM_GSP_FIRMWARE_FMC(gb203, 570.144); +NVKM_GSP_FIRMWARE_FMC(gb205, 570.144); +NVKM_GSP_FIRMWARE_FMC(gb206, 570.144); +NVKM_GSP_FIRMWARE_FMC(gb207, 570.144); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild index 2efef4b694d6..04037394a2da 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild @@ -13,6 +13,7 @@ nvkm-y += nvkm/subdev/gsp/rm/ga1xx.o nvkm-y += nvkm/subdev/gsp/rm/ad10x.o nvkm-y += nvkm/subdev/gsp/rm/gh100.o nvkm-y += nvkm/subdev/gsp/rm/gb10x.o +nvkm-y += nvkm/subdev/gsp/rm/gb20x.o include $(src)/nvkm/subdev/gsp/rm/r535/Kbuild include $(src)/nvkm/subdev/gsp/rm/r570/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c new file mode 100644 index 000000000000..950471d9996e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "gpu.h" + +#include +#include + +#include + +const struct nvkm_rm_gpu +gb20x_gpu = { + .disp.class = { + .root = GB202_DISP, + .caps = GB202_DISP_CAPS, + .core = GB202_DISP_CORE_CHANNEL_DMA, + .wndw = GB202_DISP_WINDOW_CHANNEL_DMA, + .wimm = GB202_DISP_WINDOW_IMM_CHANNEL_DMA, + .curs = GB202_DISP_CURSOR, + }, + + .usermode.class = BLACKWELL_USERMODE_A, + + .fifo.chan = { + .class = BLACKWELL_CHANNEL_GPFIFO_B, + .doorbell_handle = gb202_chan_doorbell_handle, + }, + + .ce = { + .class = BLACKWELL_DMA_COPY_B, + .grce_mask = gb202_ce_grce_mask, + }, + .gr.class = { + .i2m = BLACKWELL_INLINE_TO_MEMORY_A, + .twod = FERMI_TWOD_A, + .threed = BLACKWELL_B, + .compute = BLACKWELL_COMPUTE_B, + }, + .nvdec.class = NVCFB0_VIDEO_DECODER, + .nvenc.class = NVCFB7_VIDEO_ENCODER, + .nvjpg.class = NVCFD1_VIDEO_NVJPG, + .ofa.class = NVCFFA_VIDEO_OFA, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h index 77aa7b13a3af..46a6325641b7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h @@ -31,6 +31,7 @@ struct nvkm_rm_gpu { struct { u32 class; + u32 (*grce_mask)(struct nvkm_device *); } ce; struct { @@ -65,4 +66,5 @@ extern const struct nvkm_rm_gpu ga1xx_gpu; extern const struct nvkm_rm_gpu ad10x_gpu; extern const struct nvkm_rm_gpu gh100_gpu; extern const struct nvkm_rm_gpu gb10x_gpu; +extern const struct nvkm_rm_gpu gb20x_gpu; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c index eaba4d50860d..1ac5628c5140 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c @@ -507,6 +507,22 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo) continue; } + /* Skip SW engine - there's currently no support for NV SW classes. */ + if (type == NVKM_ENGINE_SW) + continue; + + /* Skip lone GRCEs (ones not paired with GR on a runlist), as they + * don't appear to function as async copy engines. + */ + if (type == NVKM_ENGINE_CE && + rm->gpu->ce.grce_mask && + (rm->gpu->ce.grce_mask(device) & BIT(inst)) && + !nvkm_runl_find_engn(engn, runl, engn->engine->subdev.type == NVKM_ENGINE_GR)) { + RUNL_DEBUG(runl, "skip LCE %d - GRCE without GR", inst); + nvkm_runl_del(runl); + continue; + } + ret = nvkm_rm_engine_new(gsp->rm, type, inst); if (ret) { nvkm_runl_del(runl); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c index 7e5b411fa543..498658d0c60c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c @@ -39,6 +39,16 @@ r570_wpr_libos3_gb10x = { .offset_set_by_acr = true, }; +static const struct nvkm_rm_wpr +r570_wpr_libos3_gb20x = { + .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL, + .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100, + .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, + .heap_size_non_wpr = 0x220000, + .rsvd_size_pmu = ALIGN(0x0800000 + 0x1000000 + 0x0001000, 0x20000), + .offset_set_by_acr = true, +}; + static const struct nvkm_rm_api r570_api = { .gsp = &r570_gsp, @@ -81,3 +91,9 @@ r570_rm_gb10x = { .wpr = &r570_wpr_libos3_gb10x, .api = &r570_api, }; + +const struct nvkm_rm_impl +r570_rm_gb20x = { + .wpr = &r570_wpr_libos3_gb20x, + .api = &r570_api, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h index 2e9bd74d39be..393ea775941f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h @@ -178,6 +178,7 @@ extern const struct nvkm_rm_impl r570_rm_tu102; extern const struct nvkm_rm_impl r570_rm_ga102; extern const struct nvkm_rm_impl r570_rm_gh100; extern const struct nvkm_rm_impl r570_rm_gb10x; +extern const struct nvkm_rm_impl r570_rm_gb20x; extern const struct nvkm_rm_api_gsp r570_gsp; extern const struct nvkm_rm_api_client r570_client; extern const struct nvkm_rm_api_fbsr r570_fbsr; -- cgit v1.2.3 From f0ddbb1eed1898286d2bd99fd6ab64ca9700d267 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Mar 2025 01:56:03 +1000 Subject: drm/dp: add option to disable zero sized address only transactions. Some older NVIDIA and some newer NVIDIA hardware/firmware seems to have issues with address only transactions (firmware rejects them). Add an option to the core drm dp to avoid address only transactions, This just puts the MOT flag removal on the last message of the transfer and avoids the start of transfer transaction. This with the flag set in nouveau, allows eDP probing on GB203 device. Signed-off-by: Dave Airlie Reviewed-by: Ben Skeggs Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/display/drm_dp_helper.c | 39 +++++++++++++++++++-------------- include/drm/display/drm_dp_helper.h | 5 +++++ 2 files changed, 28 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index 56c7e3318f01..f2a6559a2710 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -2137,14 +2137,17 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, for (i = 0; i < num; i++) { msg.address = msgs[i].addr; - drm_dp_i2c_msg_set_request(&msg, &msgs[i]); - /* Send a bare address packet to start the transaction. - * Zero sized messages specify an address only (bare - * address) transaction. - */ - msg.buffer = NULL; - msg.size = 0; - err = drm_dp_i2c_do_msg(aux, &msg); + + if (!aux->no_zero_sized) { + drm_dp_i2c_msg_set_request(&msg, &msgs[i]); + /* Send a bare address packet to start the transaction. + * Zero sized messages specify an address only (bare + * address) transaction. + */ + msg.buffer = NULL; + msg.size = 0; + err = drm_dp_i2c_do_msg(aux, &msg); + } /* * Reset msg.request in case in case it got @@ -2163,6 +2166,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, msg.buffer = msgs[i].buf + j; msg.size = min(transfer_size, msgs[i].len - j); + if (j + msg.size == msgs[i].len && aux->no_zero_sized) + msg.request &= ~DP_AUX_I2C_MOT; err = drm_dp_i2c_drain_msg(aux, &msg); /* @@ -2180,15 +2185,17 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, } if (err >= 0) err = num; - /* Send a bare address packet to close out the transaction. - * Zero sized messages specify an address only (bare - * address) transaction. - */ - msg.request &= ~DP_AUX_I2C_MOT; - msg.buffer = NULL; - msg.size = 0; - (void)drm_dp_i2c_do_msg(aux, &msg); + if (!aux->no_zero_sized) { + /* Send a bare address packet to close out the transaction. + * Zero sized messages specify an address only (bare + * address) transaction. + */ + msg.request &= ~DP_AUX_I2C_MOT; + msg.buffer = NULL; + msg.size = 0; + (void)drm_dp_i2c_do_msg(aux, &msg); + } return err; } diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h index 7b19192c7031..e4ca35143ff9 100644 --- a/include/drm/display/drm_dp_helper.h +++ b/include/drm/display/drm_dp_helper.h @@ -518,6 +518,11 @@ struct drm_dp_aux { * @powered_down: If true then the remote endpoint is powered down. */ bool powered_down; + + /** + * @no_zero_sized: If the hw can't use zero sized transfers (NVIDIA) + */ + bool no_zero_sized; }; int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset); -- cgit v1.2.3 From 6cc6e08d4542473d79ea83d9123d9d35e9c01c92 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 4 Feb 2025 08:55:17 +1000 Subject: drm/nouveau/kms: add support for GB20x Adds basic support for the new display classes available on GB20x GPUs. Most of the changes here deal with HW method moves, with the only other change of note being tweaks to skip allocation of CTXDMA objects, which aren't required on Blackwell display. Signed-off-by: Ben Skeggs Reviewed-by: Dave Airlie Reviewed-by: Timur Tabi Tested-by: Timur Tabi Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/dispnv50/Kbuild | 4 + drivers/gpu/drm/nouveau/dispnv50/core.c | 1 + drivers/gpu/drm/nouveau/dispnv50/core.h | 6 + drivers/gpu/drm/nouveau/dispnv50/core507d.c | 1 + drivers/gpu/drm/nouveau/dispnv50/corec37d.c | 3 +- drivers/gpu/drm/nouveau/dispnv50/corec57d.c | 2 + drivers/gpu/drm/nouveau/dispnv50/coreca7d.c | 122 +++ drivers/gpu/drm/nouveau/dispnv50/crc.c | 4 + drivers/gpu/drm/nouveau/dispnv50/crc.h | 1 + drivers/gpu/drm/nouveau/dispnv50/crcca7d.c | 98 +++ drivers/gpu/drm/nouveau/dispnv50/curs.c | 1 + drivers/gpu/drm/nouveau/dispnv50/disp.c | 10 + drivers/gpu/drm/nouveau/dispnv50/head.c | 1 + drivers/gpu/drm/nouveau/dispnv50/head.h | 5 + drivers/gpu/drm/nouveau/dispnv50/headc57d.c | 2 +- drivers/gpu/drm/nouveau/dispnv50/headca7d.c | 297 +++++++ drivers/gpu/drm/nouveau/dispnv50/wimm.c | 1 + drivers/gpu/drm/nouveau/dispnv50/wndw.c | 25 +- drivers/gpu/drm/nouveau/dispnv50/wndw.h | 3 + drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c | 1 + drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c | 209 +++++ .../gpu/drm/nouveau/include/nvhw/class/clc97b.h | 22 + .../gpu/drm/nouveau/include/nvhw/class/clca7d.h | 868 +++++++++++++++++++++ .../gpu/drm/nouveau/include/nvhw/class/clca7e.h | 137 ++++ drivers/gpu/drm/nouveau/include/nvif/pushc97b.h | 18 + drivers/gpu/drm/nouveau/nouveau_connector.c | 2 + drivers/gpu/drm/nouveau/nvif/disp.c | 1 + 27 files changed, 1836 insertions(+), 9 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/dispnv50/coreca7d.c create mode 100644 drivers/gpu/drm/nouveau/dispnv50/crcca7d.c create mode 100644 drivers/gpu/drm/nouveau/dispnv50/headca7d.c create mode 100644 drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c create mode 100644 drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h create mode 100644 drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h create mode 100644 drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h create mode 100644 drivers/gpu/drm/nouveau/include/nvif/pushc97b.h diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild index 28be2912ff74..d5049dee4b8c 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild +++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild @@ -9,11 +9,13 @@ nouveau-y += dispnv50/core907d.o nouveau-y += dispnv50/core917d.o nouveau-y += dispnv50/corec37d.o nouveau-y += dispnv50/corec57d.o +nouveau-y += dispnv50/coreca7d.o nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc.o nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc907d.o nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc37d.o nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc57d.o +nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcca7d.o nouveau-y += dispnv50/dac507d.o nouveau-y += dispnv50/dac907d.o @@ -31,6 +33,7 @@ nouveau-y += dispnv50/head907d.o nouveau-y += dispnv50/head917d.o nouveau-y += dispnv50/headc37d.o nouveau-y += dispnv50/headc57d.o +nouveau-y += dispnv50/headca7d.o nouveau-y += dispnv50/wimm.o nouveau-y += dispnv50/wimmc37b.o @@ -39,6 +42,7 @@ nouveau-y += dispnv50/wndw.o nouveau-y += dispnv50/wndwc37e.o nouveau-y += dispnv50/wndwc57e.o nouveau-y += dispnv50/wndwc67e.o +nouveau-y += dispnv50/wndwca7e.o nouveau-y += dispnv50/base.o nouveau-y += dispnv50/base507c.o diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c index f045515696cb..c6331bf97582 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/core.c +++ b/drivers/gpu/drm/nouveau/dispnv50/core.c @@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore) int version; int (*new)(struct nouveau_drm *, s32, struct nv50_core **); } cores[] = { + { GB202_DISP_CORE_CHANNEL_DMA, 0, coreca7d_new }, { AD102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new }, { GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new }, { TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new }, diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h index f75088186fba..aa07a3ad5dfd 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/core.h +++ b/drivers/gpu/drm/nouveau/dispnv50/core.h @@ -7,7 +7,10 @@ struct nv50_core { const struct nv50_core_func *func; + struct nv50_disp *disp; + struct nv50_dmac chan; + bool assign_windows; }; @@ -18,6 +21,7 @@ struct nv50_core_func { int (*init)(struct nv50_core *); void (*ntfy_init)(struct nouveau_bo *, u32 offset); int (*caps_init)(struct nouveau_drm *, struct nv50_disp *); + u32 caps_class; int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset, struct nvif_device *); int (*update)(struct nv50_core *, u32 *interlock, bool ntfy); @@ -70,4 +74,6 @@ int corec37d_wndw_owner(struct nv50_core *); extern const struct nv50_outp_func sorc37d; int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **); + +int coreca7d_new(struct nouveau_drm *, s32, struct nv50_core **); #endif diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c index ce2cb78bbdd3..4b947b67a844 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c @@ -165,6 +165,7 @@ core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm, if (!(core = *pcore = kzalloc(sizeof(*core), GFP_KERNEL))) return -ENOMEM; core->func = func; + core->disp = disp; ret = nv50_dmac_create(drm, &oclass, 0, &args, sizeof(args), diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c index 7f637b8830be..83eec2f091f0 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c @@ -105,7 +105,7 @@ int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp) int ret; ret = nvif_object_ctor(&disp->disp->object, "dispCaps", 0, - GV100_DISP_CAPS, NULL, 0, &disp->caps); + disp->core->func->caps_class, NULL, 0, &disp->caps); if (ret) { NV_ERROR(drm, "Failed to init notifier caps region: %d\n", @@ -162,6 +162,7 @@ corec37d = { .init = corec37d_init, .ntfy_init = corec37d_ntfy_init, .caps_init = corec37d_caps_init, + .caps_class = GV100_DISP_CAPS, .ntfy_wait_done = corec37d_ntfy_wait_done, .update = corec37d_update, .wndw.owner = corec37d_wndw_owner, diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c index 421d0d57e1d8..39be576eadcb 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c @@ -22,6 +22,7 @@ #include "core.h" #include "head.h" +#include #include #include @@ -63,6 +64,7 @@ corec57d = { .init = corec57d_init, .ntfy_init = corec37d_ntfy_init, .caps_init = corec37d_caps_init, + .caps_class = GV100_DISP_CAPS, .ntfy_wait_done = corec37d_ntfy_wait_done, .update = corec37d_update, .wndw.owner = corec37d_wndw_owner, diff --git a/drivers/gpu/drm/nouveau/dispnv50/coreca7d.c b/drivers/gpu/drm/nouveau/dispnv50/coreca7d.c new file mode 100644 index 000000000000..171727be400e --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/coreca7d.c @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "core.h" +#include "head.h" + +#include +#include + +#include + +#include + +static int +coreca7d_update(struct nv50_core *core, u32 *interlock, bool ntfy) +{ + const u64 ntfy_addr = core->disp->sync->offset + NV50_DISP_CORE_NTFY; + const u32 ntfy_hi = upper_32_bits(ntfy_addr); + const u32 ntfy_lo = lower_32_bits(ntfy_addr); + struct nvif_push *push = &core->chan.push; + int ret; + + ret = PUSH_WAIT(push, 5 + (ntfy ? 5 + 2 : 0)); + if (ret) + return ret; + + if (ntfy) { + PUSH_MTHD(push, NVCA7D, SET_SURFACE_ADDRESS_HI_NOTIFIER, ntfy_hi, + + SET_SURFACE_ADDRESS_LO_NOTIFIER, + NVVAL(NVCA7D, SET_SURFACE_ADDRESS_LO_NOTIFIER, ADDRESS_LO, ntfy_lo >> 4) | + NVDEF(NVCA7D, SET_SURFACE_ADDRESS_LO_NOTIFIER, TARGET, PHYSICAL_NVM) | + NVDEF(NVCA7D, SET_SURFACE_ADDRESS_LO_NOTIFIER, ENABLE, ENABLE)); + + PUSH_MTHD(push, NVCA7D, SET_NOTIFIER_CONTROL, + NVDEF(NVCA7D, SET_NOTIFIER_CONTROL, MODE, WRITE) | + NVDEF(NVCA7D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE)); + } + + PUSH_MTHD(push, NVCA7D, SET_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_CURS], + SET_WINDOW_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_WNDW]); + + PUSH_MTHD(push, NVCA7D, UPDATE, + NVDEF(NVCA7D, UPDATE, RELEASE_ELV, TRUE) | + NVDEF(NVCA7D, UPDATE, SPECIAL_HANDLING, NONE) | + NVDEF(NVCA7D, UPDATE, INHIBIT_INTERRUPTS, FALSE)); + + if (ntfy) { + PUSH_MTHD(push, NVCA7D, SET_NOTIFIER_CONTROL, + NVDEF(NVCA7D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE)); + } + + return PUSH_KICK(push); +} + +static int +coreca7d_init(struct nv50_core *core) +{ + struct nvif_push *push = &core->chan.push; + const u32 windows = 8, heads = 4; + int ret, i; + + ret = PUSH_WAIT(push, windows * 6 + heads * 6); + if (ret) + return ret; + + for (i = 0; i < windows; i++) { + PUSH_MTHD(push, NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(i), + NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED1BPP, TRUE) | + NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED2BPP, TRUE) | + NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED4BPP, TRUE) | + NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED8BPP, TRUE), + + WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(i), 0x00000000); + + PUSH_MTHD(push, NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS(i), + NVVAL(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, MAX_PIXELS_FETCHED_PER_LINE, 0x7fff) | + NVDEF(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, ILUT_ALLOWED, TRUE) | + NVDEF(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_SCALER_TAPS, TAPS_2) | + NVDEF(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, UPSCALING_ALLOWED, FALSE), + + WINDOW_SET_PHYSICAL(i), BIT(i)); + } + + for (i = 0; i < heads; i++) { + PUSH_MTHD(push, NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS(i), + NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, CURSOR, USAGE_W256_H256) | + NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, OLUT_ALLOWED, TRUE) | + NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, OUTPUT_SCALER_TAPS, TAPS_2) | + NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, UPSCALING_ALLOWED, TRUE)); + + PUSH_MTHD(push, NVCA7D, HEAD_SET_TILE_MASK(i), BIT(i)); + + PUSH_MTHD(push, NVCA7D, TILE_SET_TILE_SIZE(i), 0); + } + + core->assign_windows = true; + return PUSH_KICK(push); +} + +static const struct nv50_core_func +coreca7d = { + .init = coreca7d_init, + .ntfy_init = corec37d_ntfy_init, + .caps_init = corec37d_caps_init, + .caps_class = GB202_DISP_CAPS, + .ntfy_wait_done = corec37d_ntfy_wait_done, + .update = coreca7d_update, + .wndw.owner = corec37d_wndw_owner, + .head = &headca7d, + .sor = &sorc37d, +#if IS_ENABLED(CONFIG_DEBUG_FS) + .crc = &crcca7d, +#endif +}; + +int +coreca7d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore) +{ + return core507d_new_(&coreca7d, drm, oclass, pcore); +} diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c index 5936b6b3b15d..deb6af40ef32 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/crc.c +++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c @@ -509,6 +509,10 @@ nv50_crc_ctx_init(struct nv50_head *head, struct nvif_mmu *mmu, if (ret) return ret; + /* No CTXDMAs on Blackwell. */ + if (core->chan.base.user.oclass >= GB202_DISP_CORE_CHANNEL_DMA) + return 0; + ret = nvif_object_ctor(&core->chan.base.user, "kmsCrcNtfyCtxDma", NV50_DISP_HANDLE_CRC_CTX(head, idx), NV_DMA_IN_MEMORY, diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.h b/drivers/gpu/drm/nouveau/dispnv50/crc.h index 4823f1fde2dd..75a2009e8193 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/crc.h +++ b/drivers/gpu/drm/nouveau/dispnv50/crc.h @@ -94,6 +94,7 @@ void nv50_crc_atomic_clr(struct nv50_head *); extern const struct nv50_crc_func crc907d; extern const struct nv50_crc_func crcc37d; extern const struct nv50_crc_func crcc57d; +extern const struct nv50_crc_func crcca7d; #else /* IS_ENABLED(CONFIG_DEBUG_FS) */ struct nv50_crc {}; diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcca7d.c b/drivers/gpu/drm/nouveau/dispnv50/crcca7d.c new file mode 100644 index 000000000000..912f59aebe87 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/crcca7d.c @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "crcc37d.h" +#include "core.h" +#include "head.h" + +#include + +#include + +static int +crcca7d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx) +{ + struct nvif_push *push = &head->disp->core->chan.push; + const int i = head->base.index; + int ret; + + ret = PUSH_WAIT(push, ctx ? 3 : 2); + if (ret) + return ret; + + if (ctx) { + const u32 crc_hi = upper_32_bits(ctx->mem.addr); + const u32 crc_lo = lower_32_bits(ctx->mem.addr); + + PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_HI_CRC(i), crc_hi, + + HEAD_SET_SURFACE_ADDRESS_LO_CRC(i), + NVVAL(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, ADDRESS_LO, crc_lo >> 4) | + NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, TARGET, PHYSICAL_NVM) | + NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, ENABLE, ENABLE)); + } else { + PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC(i), + NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, ENABLE, DISABLE)); + } + + return 0; +} + +static int +crcca7d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source, + struct nv50_crc_notifier_ctx *ctx) +{ + struct nvif_push *push = &head->disp->core->chan.push; + const int i = head->base.index; + int primary_crc, ret; + + if (!source) { + ret = PUSH_WAIT(push, 1); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7D, HEAD_SET_CRC_CONTROL(i), 0); + + return crcca7d_set_ctx(head, NULL); + } + + switch (source) { + case NV50_CRC_SOURCE_TYPE_SOR: + primary_crc = NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(or); + break; + case NV50_CRC_SOURCE_TYPE_SF: + primary_crc = NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF; + break; + default: + break; + } + + ret = crcca7d_set_ctx(head, ctx); + if (ret) + return ret; + + ret = PUSH_WAIT(push, 2); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7D, HEAD_SET_CRC_CONTROL(i), + NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) | + NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) | + NVVAL(NVCA7D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, primary_crc) | + NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) | + NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE)); + + return 0; +} + +const struct nv50_crc_func +crcca7d = { + .set_src = crcca7d_set_src, + .set_ctx = crcca7d_set_ctx, + .get_entry = crcc37d_get_entry, + .ctx_finished = crcc37d_ctx_finished, + .flip_threshold = CRCC37D_FLIP_THRESHOLD, + .num_entries = CRCC37D_MAX_ENTRIES, + .notifier_len = sizeof(struct crcc37d_notifier), +}; diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c index 31d8b2e4791d..557bd05240fa 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/curs.c +++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c @@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw) int version; int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **); } curses[] = { + { GB202_DISP_CURSOR, 0, cursc37a_new }, { GA102_DISP_CURSOR, 0, cursc37a_new }, { TU102_DISP_CURSOR, 0, cursc37a_new }, { GV100_DISP_CURSOR, 0, cursc37a_new }, diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 10485510b539..e5d37eee4301 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -279,6 +279,16 @@ nv50_dmac_create(struct nouveau_drm *drm, if (syncbuf < 0) return 0; + /* No CTXDMAs on Blackwell. */ + if (disp->oclass >= GB202_DISP) { + /* "handle != NULL_HANDLE" is used to determine enable status + * in a number of places, so fill in some fake object handles. + */ + dmac->sync.handle = NV50_DISP_HANDLE_SYNCBUF; + dmac->vram.handle = NV50_DISP_HANDLE_VRAM; + return 0; + } + ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF, NV_DMA_IN_MEMORY, &(struct nv_dma_v0) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index d7c74cc43ba5..3dd742b4f823 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -577,6 +577,7 @@ nv50_head_create(struct drm_device *dev, int index) return ERR_PTR(-ENOMEM); head->func = disp->core->func->head; + head->disp = disp; head->base.index = index; if (disp->disp->object.oclass < GF110_DISP) diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h index e9d17037ffcf..8bd2fcb1eff5 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.h +++ b/drivers/gpu/drm/nouveau/dispnv50/head.h @@ -13,6 +13,8 @@ struct nv50_head { const struct nv50_head_func *func; + struct nv50_disp *disp; + struct nouveau_crtc base; struct nv50_crc crc; struct nv50_lut olut; @@ -98,4 +100,7 @@ int headc37d_dither(struct nv50_head *, struct nv50_head_atom *); void headc37d_static_wndw_map(struct nv50_head *, struct nv50_head_atom *); extern const struct nv50_head_func headc57d; +bool headc57d_olut(struct nv50_head *, struct nv50_head_atom *, int size); + +extern const struct nv50_head_func headca7d; #endif diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c index fde4087e7691..3f8ba495de8f 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c @@ -182,7 +182,7 @@ headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem) writew(readw(mem - 4), mem + 4); } -static bool +bool headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) { if (size != 0 && size != 256 && size != 1024) diff --git a/drivers/gpu/drm/nouveau/dispnv50/headca7d.c b/drivers/gpu/drm/nouveau/dispnv50/headca7d.c new file mode 100644 index 000000000000..eeaeb15aa664 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/headca7d.c @@ -0,0 +1,297 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "head.h" +#include "atom.h" +#include "core.h" + +#include + +#include + +static int +headca7d_display_id(struct nv50_head *head, u32 display_id) +{ + struct nvif_push *push = &head->disp->core->chan.push; + const int i = head->base.index; + int ret; + + ret = PUSH_WAIT(push, 2); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7D, HEAD_SET_DISPLAY_ID(i, 0), display_id); + + return 0; +} + +static int +headca7d_or(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nvif_push *push = &head->disp->core->chan.push; + const int i = head->base.index; + u8 depth; + int ret; + + switch (asyh->or.depth) { + case 6: + depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444; + break; + case 5: + depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; + break; + case 2: + depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444; + break; + case 0: + depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; + break; + default: + WARN_ON(1); + return -EINVAL; + } + + ret = PUSH_WAIT(push, 2); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i), + NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) | + NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) | + NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) | + NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, depth) | + NVDEF(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, COLOR_SPACE_OVERRIDE, DISABLE) | + NVDEF(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, EXT_PACKET_WIN, NONE)); + + return 0; +} + +static int +headca7d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nvif_push *push = &head->disp->core->chan.push; + const int i = head->base.index; + int ret; + + ret = PUSH_WAIT(push, 2); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7D, HEAD_SET_PROCAMP(i), + NVDEF(NVCA7D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) | + NVDEF(NVCA7D, HEAD_SET_PROCAMP, CHROMA_LPF, DISABLE) | + NVDEF(NVCA7D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA)); + + return 0; +} + +static int +headca7d_dither(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nvif_push *push = &head->disp->core->chan.push; + const int i = head->base.index; + int ret; + + ret = PUSH_WAIT(push, 2); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7D, HEAD_SET_DITHER_CONTROL(i), + NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) | + NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) | + NVDEF(NVCA7D, HEAD_SET_DITHER_CONTROL, OFFSET_ENABLE, DISABLE) | + NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) | + NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, PHASE, 0)); + + return 0; +} + +static int +headca7d_curs_clr(struct nv50_head *head) +{ + struct nvif_push *push = &head->disp->core->chan.push; + const int i = head->base.index; + int ret; + + ret = PUSH_WAIT(push, 4); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL_CURSOR(i), + NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) | + NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8)); + + PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(i, 0), + NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, ENABLE, DISABLE)); + + return 0; +} + +static int +headca7d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nvif_push *push = &head->disp->core->chan.push; + const u32 curs_hi = upper_32_bits(asyh->curs.offset); + const u32 curs_lo = lower_32_bits(asyh->curs.offset); + const int i = head->base.index; + int ret; + + ret = PUSH_WAIT(push, 7); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_HI_CURSOR(i, 0), curs_hi); + + PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(i, 0), + NVVAL(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, ADDRESS_LO, curs_lo >> 4) | + NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, TARGET, PHYSICAL_NVM) | + NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, ENABLE, ENABLE)); + + PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL_CURSOR(i), + NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) | + NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) | + NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) | + NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) | + NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0), + + HEAD_SET_CONTROL_CURSOR_COMPOSITION(i), + NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, K1, 0xff) | + NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, CURSOR_COLOR_FACTOR_SELECT, + K1) | + NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, VIEWPORT_COLOR_FACTOR_SELECT, + NEG_K1_TIMES_SRC) | + NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, MODE, BLEND)); + + return 0; +} + +static int +headca7d_olut_clr(struct nv50_head *head) +{ + struct nvif_push *push = &head->disp->core->chan.push; + const int i = head->base.index; + int ret; + + ret = PUSH_WAIT(push, 2); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT(i), + NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, ENABLE, DISABLE)); + + return 0; +} + +static int +headca7d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nvif_push *push = &head->disp->core->chan.push; + const u32 olut_hi = upper_32_bits(asyh->olut.offset); + const u32 olut_lo = lower_32_bits(asyh->olut.offset); + const int i = head->base.index; + int ret; + + ret = PUSH_WAIT(push, 6); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_HI_OLUT(i), olut_hi, + + HEAD_SET_SURFACE_ADDRESS_LO_OLUT(i), + NVVAL(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, ADDRESS_LO, olut_lo >> 4) | + NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, TARGET, PHYSICAL_NVM) | + NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, ENABLE, ENABLE)); + + PUSH_MTHD(push, NVCA7D, HEAD_SET_OLUT_CONTROL(i), + NVVAL(NVCA7D, HEAD_SET_OLUT_CONTROL, INTERPOLATE, asyh->olut.output_mode) | + NVDEF(NVCA7D, HEAD_SET_OLUT_CONTROL, MIRROR, DISABLE) | + NVVAL(NVCA7D, HEAD_SET_OLUT_CONTROL, MODE, asyh->olut.mode) | + NVVAL(NVCA7D, HEAD_SET_OLUT_CONTROL, SIZE, asyh->olut.size), + + HEAD_SET_OLUT_FP_NORM_SCALE(i), 0xffffffff); + + return 0; +} + +static int +headca7d_mode(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nvif_push *push = &head->disp->core->chan.push; + struct nv50_head_mode *m = &asyh->mode; + const int i = head->base.index; + int ret; + + ret = PUSH_WAIT(push, 11); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7D, HEAD_SET_RASTER_SIZE(i), + NVVAL(NVCA7D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) | + NVVAL(NVCA7D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active), + + HEAD_SET_RASTER_SYNC_END(i), + NVVAL(NVCA7D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) | + NVVAL(NVCA7D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce), + + HEAD_SET_RASTER_BLANK_END(i), + NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) | + NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke), + + HEAD_SET_RASTER_BLANK_START(i), + NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) | + NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks)); + + PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL(i), + NVDEF(NVCA7D, HEAD_SET_CONTROL, STRUCTURE, PROGRESSIVE)); + + PUSH_MTHD(push, NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i), + NVVAL(NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000)); + + PUSH_MTHD(push, NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i), + NVVAL(NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000)); + + return 0; +} + +static int +headca7d_view(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nvif_push *push = &head->disp->core->chan.push; + const int i = head->base.index; + int ret; + + ret = PUSH_WAIT(push, 4); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7D, HEAD_SET_VIEWPORT_SIZE_IN(i), + NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) | + NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH)); + + PUSH_MTHD(push, NVCA7D, HEAD_SET_VIEWPORT_SIZE_OUT(i), + NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) | + NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH)); + return 0; +} + +const struct nv50_head_func +headca7d = { + .view = headca7d_view, + .mode = headca7d_mode, + .olut = headc57d_olut, + .ilut_check = head907d_ilut_check, + .olut_identity = true, + .olut_size = 1024, + .olut_set = headca7d_olut_set, + .olut_clr = headca7d_olut_clr, + .curs_layout = head917d_curs_layout, + .curs_format = headc37d_curs_format, + .curs_set = headca7d_curs_set, + .curs_clr = headca7d_curs_clr, + .dither = headca7d_dither, + .procamp = headca7d_procamp, + .or = headca7d_or, + .static_wndw_map = headc37d_static_wndw_map, + .display_id = headca7d_display_id, +}; diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.c b/drivers/gpu/drm/nouveau/dispnv50/wimm.c index 566fbddfc8d7..53c9ab6c138b 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wimm.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.c @@ -31,6 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw) int version; int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *); } wimms[] = { + { GB202_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init }, { GA102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init }, { TU102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init }, { GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init }, diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index f6be426dd525..11d5b923d6e7 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -556,14 +556,24 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) return ret; if (wndw->ctxdma.parent) { - ctxdma = nv50_wndw_ctxdma_new(wndw, fb); - if (IS_ERR(ctxdma)) { - nouveau_bo_unpin(nvbo); - return PTR_ERR(ctxdma); + if (wndw->wndw.base.user.oclass < GB202_DISP_WINDOW_CHANNEL_DMA) { + ctxdma = nv50_wndw_ctxdma_new(wndw, fb); + if (IS_ERR(ctxdma)) { + nouveau_bo_unpin(nvbo); + return PTR_ERR(ctxdma); + } + + if (asyw->visible) + asyw->image.handle[0] = ctxdma->object.handle; + } else { + /* No CTXDMAs on Blackwell. */ + if (asyw->visible) { + /* "handle != NULL_HANDLE" is used to determine enable status + * in a number of places, so fill in a fake object handle. + */ + asyw->image.handle[0] = NV50_DISP_HANDLE_WNDW_CTX(0); + } } - - if (asyw->visible) - asyw->image.handle[0] = ctxdma->object.handle; } ret = drm_gem_plane_helper_prepare_fb(plane, state); @@ -901,6 +911,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index, int (*new)(struct nouveau_drm *, enum drm_plane_type, int, s32, struct nv50_wndw **); } wndws[] = { + { GB202_DISP_WINDOW_CHANNEL_DMA, 0, wndwca7e_new }, { GA102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc67e_new }, { TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new }, { GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new }, diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h index 76a6ae5d5652..90d100514bef 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h @@ -134,6 +134,9 @@ int wndwc57e_csc_clr(struct nv50_wndw *); int wndwc67e_new(struct nouveau_drm *, enum drm_plane_type, int, s32, struct nv50_wndw **); +int wndwca7e_new(struct nouveau_drm *, enum drm_plane_type, int, s32, + struct nv50_wndw **); + int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index, struct nv50_wndw **); #endif diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c index 50a7b97d37a2..554c4f91f8be 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c @@ -25,6 +25,7 @@ #include #include +#include #include #include diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c new file mode 100644 index 000000000000..0d8e9a9d1a57 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#include "wndw.h" +#include "atom.h" + +#include + +#include + +#include + +static int +wndwca7e_image_clr(struct nv50_wndw *wndw) +{ + struct nvif_push *push = &wndw->wndw.push; + int ret; + + ret = PUSH_WAIT(push, 4); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7E, SET_PRESENT_CONTROL, + NVVAL(NVCA7E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, 0) | + NVDEF(NVCA7E, SET_PRESENT_CONTROL, BEGIN_MODE, NON_TEARING)); + + PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_ISO(0), + NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, ENABLE, DISABLE)); + + return 0; +} + +static int +wndwca7e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) +{ + const u32 iso0_hi = upper_32_bits(asyw->image.offset[0]); + const u32 iso0_lo = lower_32_bits(asyw->image.offset[0]); + struct nvif_push *push = &wndw->wndw.push; + int ret, kind; + + if (asyw->image.kind) + kind = NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_BLOCKLINEAR; + else + kind = NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_PITCH; + + ret = PUSH_WAIT(push, 17); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_HI_ISO(0), iso0_hi); + + PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_ISO(0), + NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, ADDRESS_LO, iso0_lo >> 4) | + NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, TARGET, PHYSICAL_NVM) | + NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, KIND, kind) | + NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, ENABLE, ENABLE)); + + PUSH_MTHD(push, NVCA7E, SET_PRESENT_CONTROL, + NVVAL(NVCA7E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) | + NVVAL(NVCA7E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) | + NVDEF(NVCA7E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE)); + + PUSH_MTHD(push, NVCA7E, SET_SIZE, + NVVAL(NVCA7E, SET_SIZE, WIDTH, asyw->image.w) | + NVVAL(NVCA7E, SET_SIZE, HEIGHT, asyw->image.h), + + SET_STORAGE, + NVVAL(NVCA7E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh), + + SET_PARAMS, + NVVAL(NVCA7E, SET_PARAMS, FORMAT, asyw->image.format) | + NVDEF(NVCA7E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) | + NVDEF(NVCA7E, SET_PARAMS, SWAP_UV, DISABLE) | + NVDEF(NVCA7E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST), + + SET_PLANAR_STORAGE(0), + NVVAL(NVCA7E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) | + NVVAL(NVCA7E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6)); + + PUSH_MTHD(push, NVCA7E, SET_POINT_IN(0), + NVVAL(NVCA7E, SET_POINT_IN, X, asyw->state.src_x >> 16) | + NVVAL(NVCA7E, SET_POINT_IN, Y, asyw->state.src_y >> 16)); + + PUSH_MTHD(push, NVCA7E, SET_SIZE_IN, + NVVAL(NVCA7E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) | + NVVAL(NVCA7E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16)); + + PUSH_MTHD(push, NVCA7E, SET_SIZE_OUT, + NVVAL(NVCA7E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) | + NVVAL(NVCA7E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h)); + + return 0; +} + +static int +wndwca7e_ilut_clr(struct nv50_wndw *wndw) +{ + struct nvif_push *push = &wndw->wndw.push; + int ret; + + ret = PUSH_WAIT(push, 2); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, + NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, ENABLE, DISABLE)); + + return 0; +} + +static int +wndwca7e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) +{ + const u32 ilut_hi = upper_32_bits(asyw->xlut.i.offset); + const u32 ilut_lo = lower_32_bits(asyw->xlut.i.offset); + struct nvif_push *push = &wndw->wndw.push; + int ret; + + ret = PUSH_WAIT(push, 5); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_HI_ILUT, ilut_hi, + + SET_SURFACE_ADDRESS_LO_ILUT, + NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, ADDRESS_LO, ilut_lo >> 4) | + NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, TARGET, PHYSICAL_NVM) | + NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, ENABLE, ENABLE)); + + PUSH_MTHD(push, NVCA7E, SET_ILUT_CONTROL, + NVVAL(NVCA7E, SET_ILUT_CONTROL, SIZE, asyw->xlut.i.size) | + NVVAL(NVCA7E, SET_ILUT_CONTROL, MODE, asyw->xlut.i.mode) | + NVVAL(NVCA7E, SET_ILUT_CONTROL, INTERPOLATE, asyw->xlut.i.output_mode)); + + return 0; +} + +static int +wndwca7e_ntfy_clr(struct nv50_wndw *wndw) +{ + struct nvif_push *push = &wndw->wndw.push; + int ret; + + ret = PUSH_WAIT(push, 2); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, + NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, ENABLE, DISABLE)); + + return 0; +} + +static int +wndwca7e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) +{ + struct nv50_disp *disp = nv50_disp(wndw->plane.dev); + const u64 ntfy_addr = disp->sync->offset + asyw->ntfy.offset; + const u32 ntfy_hi = upper_32_bits(ntfy_addr); + const u32 ntfy_lo = lower_32_bits(ntfy_addr); + struct nvif_push *push = &wndw->wndw.push; + int ret; + + ret = PUSH_WAIT(push, 5); + if (ret) + return ret; + + PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_HI_NOTIFIER, ntfy_hi, + + SET_SURFACE_ADDRESS_LO_NOTIFIER, + NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, ADDRESS_LO, ntfy_lo >> 4) | + NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, TARGET, PHYSICAL_NVM) | + NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, ENABLE, ENABLE)); + + PUSH_MTHD(push, NVCA7E, SET_NOTIFIER_CONTROL, + NVVAL(NVCA7E, SET_NOTIFIER_CONTROL, MODE, asyw->ntfy.awaken)); + + return 0; +} + +static const struct nv50_wndw_func +wndwca7e = { + .acquire = wndwc37e_acquire, + .release = wndwc37e_release, + .ntfy_set = wndwca7e_ntfy_set, + .ntfy_clr = wndwca7e_ntfy_clr, + .ntfy_reset = corec37d_ntfy_init, + .ntfy_wait_begun = base507c_ntfy_wait_begun, + .ilut = wndwc57e_ilut, + .ilut_identity = true, + .ilut_size = 1024, + .xlut_set = wndwca7e_ilut_set, + .xlut_clr = wndwca7e_ilut_clr, + .csc = base907c_csc, + .csc_set = wndwc57e_csc_set, + .csc_clr = wndwc57e_csc_clr, + .image_set = wndwca7e_image_set, + .image_clr = wndwca7e_image_clr, + .blend_set = wndwc37e_blend_set, + .update = wndwc37e_update, +}; + +int +wndwca7e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index, + s32 oclass, struct nv50_wndw **pwndw) +{ + return wndwc37e_new_(&wndwca7e, drm, type, index, oclass, BIT(index >> 1), pwndw); +} diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h new file mode 100644 index 000000000000..092aebe9551c --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef _clc97b_h_ +#define _clc97b_h_ + +// dma opcode instructions +#define NVC97B_DMA +#define NVC97B_DMA_OPCODE 31:29 +#define NVC97B_DMA_OPCODE_METHOD 0x00000000 +#define NVC97B_DMA_OPCODE_JUMP 0x00000001 +#define NVC97B_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC97B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC97B_DMA_METHOD_COUNT 27:18 +#define NVC97B_DMA_METHOD_OFFSET 15:2 +#define NVC97B_DMA_DATA 31:0 +#define NVC97B_DMA_DATA_NOP 0x00000000 +#define NVC97B_DMA_JUMP_OFFSET 15:2 +#define NVC97B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +#endif // _clc97b_h diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h new file mode 100644 index 000000000000..0fec6fc21d44 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h @@ -0,0 +1,868 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef _clca7d_h_ +#define _clca7d_h_ + +// class methods +#define NVCA7D_UPDATE (0x00000200) +#define NVCA7D_UPDATE_SPECIAL_HANDLING 21:20 +#define NVCA7D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NVCA7D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NVCA7D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NVCA7D_UPDATE_SPECIAL_HANDLING_REASON 19:12 +#define NVCA7D_UPDATE_INHIBIT_INTERRUPTS 24:24 +#define NVCA7D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NVCA7D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NVCA7D_UPDATE_RELEASE_ELV 0:0 +#define NVCA7D_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVCA7D_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVCA7D_UPDATE_FORCE_FULLSCREEN 28:28 +#define NVCA7D_UPDATE_FORCE_FULLSCREEN_FALSE (0x00000000) +#define NVCA7D_UPDATE_FORCE_FULLSCREEN_TRUE (0x00000001) +#define NVCA7D_SET_NOTIFIER_CONTROL (0x0000020C) +#define NVCA7D_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVCA7D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVCA7D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVCA7D_SET_NOTIFIER_CONTROL_NOTIFY 12:12 +#define NVCA7D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NVCA7D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NVCA7D_SET_INTERLOCK_FLAGS (0x00000218) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVCA7D_SET_SURFACE_ADDRESS_HI_NOTIFIER (0x00000260) +#define NVCA7D_SET_SURFACE_ADDRESS_HI_NOTIFIER_ADDRESS_HI 31:0 +#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER (0x00000264) +#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ADDRESS_LO 31:4 +#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET 3:2 +#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_IOVA (0x00000000) +#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE 0:0 +#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_DISABLE (0x00000000) +#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_ENABLE (0x00000001) + +#define NVCA7D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVCA7D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_HDMI_FRL (0x0000000C) +#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NVCA7D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVCA7D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVCA7D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) + +#define NVCA7D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080) +#define NVCA7D_WINDOW_SET_CONTROL_OWNER 3:0 +#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i)) +#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8 +#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000) +#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001) +#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002) +#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003) +#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004) +#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005) +#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006) +#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007) +#define NVCA7D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F) +#define NVCA7D_WINDOW_SET_CONTROL_HIDE 8:8 +#define NVCA7D_WINDOW_SET_CONTROL_HIDE_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_CONTROL_HIDE_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS 9:9 +#define NVCA7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME 10:10 +#define NVCA7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080) +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0 +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED 16:16 +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED 28:28 +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20 +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24 +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED 30:30 +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT 26:25 +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_PITCH_BLOCKLINEAR (0x00000000) +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_PITCH (0x00000001) +#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_BLOCKLINEAR (0x00000002) +#define NVCA7D_WINDOW_SET_PHYSICAL(a) (0x00001014 + (a)*0x00000080) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW 31:0 +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_NONE (0x00000000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW0 (0x00000001) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW1 (0x00000002) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW2 (0x00000004) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW3 (0x00000008) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW4 (0x00000010) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW5 (0x00000020) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW6 (0x00000040) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW7 (0x00000080) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW8 (0x00000100) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW9 (0x00000200) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW10 (0x00000400) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW11 (0x00000800) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW12 (0x00001000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW13 (0x00002000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW14 (0x00004000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW15 (0x00008000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW16 (0x00010000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW17 (0x00020000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW18 (0x00040000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW19 (0x00080000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW20 (0x00100000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW21 (0x00200000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW22 (0x00400000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW23 (0x00800000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW24 (0x01000000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW25 (0x02000000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW26 (0x04000000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW27 (0x08000000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW28 (0x10000000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW29 (0x20000000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW30 (0x40000000) +#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW31 (0x80000000) + +#define NVCA7D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3 +#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001) +#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V 4:4 +#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_ENABLE (0x00000001) +#define NVCA7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28 +#define NVCA7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NVCA7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2 +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3 +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4 +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_444 (0x00000009) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444NP (0x0000000A) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24 +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12 +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN 31:26 +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0 (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1 (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2 (0x00000002) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3 (0x00000003) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4 (0x00000004) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5 (0x00000005) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6 (0x00000006) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7 (0x00000007) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8 (0x00000008) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9 (0x00000009) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10 (0x0000000A) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11 (0x0000000B) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12 (0x0000000C) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13 (0x0000000D) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14 (0x0000000E) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15 (0x0000000F) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16 (0x00000010) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17 (0x00000011) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18 (0x00000012) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19 (0x00000013) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20 (0x00000014) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21 (0x00000015) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22 (0x00000016) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23 (0x00000017) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24 (0x00000018) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25 (0x00000019) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26 (0x0000001A) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27 (0x0000001B) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28 (0x0000001C) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29 (0x0000001D) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30 (0x0000001E) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31 (0x0000001F) +#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE (0x0000003F) +#define NVCA7D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_CONTROL_STRUCTURE 1:0 +#define NVCA7D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2 +#define NVCA7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_YUV420PACKER 3:3 +#define NVCA7D_HEAD_SET_CONTROL_YUV420PACKER_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_YUV420PACKER_ENABLE (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE 11:10 +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_NO_LOCK (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN 8:4 +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCKOUT_WINDOW 15:12 +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE 23:22 +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_NO_LOCK (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN 20:16 +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN 28:24 +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVCA7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE 30:30 +#define NVCA7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE 31:31 +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000800) +#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NVCA7D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NVCA7D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS 5:4 +#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000) +#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001) +#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002) +#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003) +#define NVCA7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2 +#define NVCA7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001) +#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE 10:8 +#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_ROUND (0x00000005) +#define NVCA7D_HEAD_SET_DITHER_CONTROL_PHASE 13:12 +#define NVCA7D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000800 + (b)*0x00000004) +#define NVCA7D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0 +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED 4:4 +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE (0x00000000) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE (0x00000001) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED 5:5 +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED_FALSE (0x00000000) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED_TRUE (0x00000001) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS 14:12 +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8 +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED 16:16 +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001) +#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_ELV_START 31:17 +#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000800) +#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NVCA7D_HEAD_SET_TILE_MASK(a) (0x00002060 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_TILE_MASK_TILE 7:0 +#define NVCA7D_HEAD_SET_TILE_MASK_TILE_NONE (0x00000000) +#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE0 (0x00000001) +#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE1 (0x00000002) +#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE2 (0x00000004) +#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE3 (0x00000008) +#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE4 (0x00000010) +#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE5 (0x00000020) +#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE6 (0x00000040) +#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE7 (0x00000080) +#define NVCA7D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_RASTER_SIZE_WIDTH 15:0 +#define NVCA7D_HEAD_SET_RASTER_SIZE_HEIGHT 31:16 +#define NVCA7D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NVCA7D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NVCA7D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000800) +#define NVCA7D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NVCA7D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NVCA7D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NVCA7D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NVCA7D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000800) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0 +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8 +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12 +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20 +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0 +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8 +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12 +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16 +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS 20:20 +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_ENABLE (0x00000001) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CRC(a) (0x00002150 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CRC_ADDRESS_HI 31:0 +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC(a) (0x00002154 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ADDRESS_LO 31:4 +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET 3:2 +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_IOVA (0x00000000) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE 0:0 +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE_ENABLE (0x00000001) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_OLUT(a) (0x00002158 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_OLUT_ADDRESS_HI 31:0 +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT(a) (0x0000215C + (a)*0x00000800) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ADDRESS_LO 31:4 +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET 3:2 +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_IOVA (0x00000000) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE 0:0 +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE_ENABLE (0x00000001) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CURSOR(a,b) (0x00002170 + (a)*0x00000800 + (b)*0x00000004) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CURSOR_ADDRESS_HI 31:0 +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(a,b) (0x00002178 + (a)*0x00000800 + (b)*0x00000004) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ADDRESS_LO 31:4 +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET 3:2 +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_IOVA (0x00000000) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND 1:1 +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND_PITCH (0x00000000) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND_BLOCKLINEAR (0x00000001) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE 0:0 +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVCA7D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0 +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020) +#define NVCA7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8 +#define NVCA7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NVCA7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12 +#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000) +#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030) +#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8 +#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050) +#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051) +#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052) +#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053) +#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054) +#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055) +#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056) +#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057) +#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20 +#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000) +#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030) +#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8 +#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050) +#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051) +#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052) +#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053) +#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054) +#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055) +#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056) +#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057) +#define NVCA7D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9 +#define NVCA7D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0 +#define NVCA7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVCA7D_HEAD_SET_OLUT_CONTROL_MIRROR 1:1 +#define NVCA7D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE 3:2 +#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVCA7D_HEAD_SET_OLUT_CONTROL_SIZE 18:8 +#define NVCA7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND 4:4 +#define NVCA7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND_DISABLE (0x00000000) +#define NVCA7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND_ENABLE (0x00000001) +#define NVCA7D_HEAD_SET_OLUT_CONTROL_LEVEL 25:20 +#define NVCA7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS 5:5 +#define NVCA7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS_SIZE_3BITS (0x00000000) +#define NVCA7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS_SIZE_4BITS (0x00000001) +#define NVCA7D_HEAD_SET_OLUT_FP_NORM_SCALE(a) (0x00002284 + (a)*0x00000800) +#define NVCA7D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE 31:0 + +#define NVCA7D_TILE_SET_TILE_SIZE(a) (0x00006000 + (a)*0x00000200) +#define NVCA7D_TILE_SET_TILE_SIZE_START 14:0 +#define NVCA7D_TILE_SET_TILE_SIZE_WIDTH 30:16 + +#endif // _clca7d_h diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h new file mode 100644 index 000000000000..ebfb2e48a4f4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. + */ +#ifndef _clca7e_h_ +#define _clca7e_h_ + +// class methods +#define NVCA7E_SET_NOTIFIER_CONTROL (0x00000220) +#define NVCA7E_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVCA7E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVCA7E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVCA7E_SET_SIZE (0x00000224) +#define NVCA7E_SET_SIZE_WIDTH 15:0 +#define NVCA7E_SET_SIZE_HEIGHT 31:16 +#define NVCA7E_SET_STORAGE (0x00000228) +#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVCA7E_SET_PARAMS (0x0000022C) +#define NVCA7E_SET_PARAMS_FORMAT 7:0 +#define NVCA7E_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NVCA7E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F) +#define NVCA7E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NVCA7E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NVCA7E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E) +#define NVCA7E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NVCA7E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6) +#define NVCA7E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NVCA7E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9) +#define NVCA7E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NVCA7E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NVCA7E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NVCA7E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NVCA7E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NVCA7E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028) +#define NVCA7E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029) +#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035) +#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036) +#define NVCA7E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038) +#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8___V8_N444 (0x0000003A) +#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8___V8_N420 (0x0000003B) +#define NVCA7E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055) +#define NVCA7E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056) +#define NVCA7E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058) +#define NVCA7E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075) +#define NVCA7E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076) +#define NVCA7E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078) +#define NVCA7E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18 +#define NVCA7E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000) +#define NVCA7E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001) +#define NVCA7E_SET_PARAMS_SWAP_UV 19:19 +#define NVCA7E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000) +#define NVCA7E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001) +#define NVCA7E_SET_PARAMS_FMT_ROUNDING_MODE 22:22 +#define NVCA7E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_TO_NEAREST (0x00000000) +#define NVCA7E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_DOWN (0x00000001) +#define NVCA7E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004) +#define NVCA7E_SET_PLANAR_STORAGE_PITCH 12:0 +#define NVCA7E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004) +#define NVCA7E_SET_POINT_IN_X 15:0 +#define NVCA7E_SET_POINT_IN_Y 31:16 +#define NVCA7E_SET_SIZE_IN (0x00000298) +#define NVCA7E_SET_SIZE_IN_WIDTH 15:0 +#define NVCA7E_SET_SIZE_IN_HEIGHT 31:16 +#define NVCA7E_SET_SIZE_OUT (0x000002A4) +#define NVCA7E_SET_SIZE_OUT_WIDTH 15:0 +#define NVCA7E_SET_SIZE_OUT_HEIGHT 31:16 +#define NVCA7E_SET_PRESENT_CONTROL (0x00000308) +#define NVCA7E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NVCA7E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4 +#define NVCA7E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NVCA7E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NVCA7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8 +#define NVCA7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NVCA7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE 13:12 +#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000) +#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001) +#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002) +#define NVCA7E_SET_ILUT_CONTROL (0x00000440) +#define NVCA7E_SET_ILUT_CONTROL_INTERPOLATE 0:0 +#define NVCA7E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVCA7E_SET_ILUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVCA7E_SET_ILUT_CONTROL_MIRROR 1:1 +#define NVCA7E_SET_ILUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVCA7E_SET_ILUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVCA7E_SET_ILUT_CONTROL_MODE 3:2 +#define NVCA7E_SET_ILUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVCA7E_SET_ILUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVCA7E_SET_ILUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVCA7E_SET_ILUT_CONTROL_SIZE 18:8 +#define NVCA7E_SET_SURFACE_ADDRESS_HI_NOTIFIER (0x00000650) +#define NVCA7E_SET_SURFACE_ADDRESS_HI_NOTIFIER_ADDRESS_HI 31:0 +#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER (0x00000654) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ADDRESS_LO 31:4 +#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET 3:2 +#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_IOVA (0x00000000) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE 0:0 +#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_DISABLE (0x00000000) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_ENABLE (0x00000001) +#define NVCA7E_SET_SURFACE_ADDRESS_HI_ISO(b) (0x00000658 + (b)*0x00000004) +#define NVCA7E_SET_SURFACE_ADDRESS_HI_ISO_ADDRESS_HI 31:0 +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO(b) (0x00000670 + (b)*0x00000004) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ADDRESS_LO 31:4 +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET 3:2 +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_IOVA (0x00000000) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND 1:1 +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_PITCH (0x00000000) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_BLOCKLINEAR (0x00000001) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE 0:0 +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE_DISABLE (0x00000000) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE_ENABLE (0x00000001) +#define NVCA7E_SET_SURFACE_ADDRESS_HI_ILUT (0x00000688) +#define NVCA7E_SET_SURFACE_ADDRESS_HI_ILUT_ADDRESS_HI 31:0 +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT (0x0000068C) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ADDRESS_LO 31:4 +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET 3:2 +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_IOVA (0x00000000) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_NVM (0x00000001) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_PCI (0x00000002) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_PCI_COHERENT (0x00000003) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE 0:0 +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE_DISABLE (0x00000000) +#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE_ENABLE (0x00000001) + +#endif // _clca7e_h diff --git a/drivers/gpu/drm/nouveau/include/nvif/pushc97b.h b/drivers/gpu/drm/nouveau/include/nvif/pushc97b.h new file mode 100644 index 000000000000..c8d6b6319134 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvif/pushc97b.h @@ -0,0 +1,18 @@ +#ifndef __NVIF_PUSHC97B_H__ +#define __NVIF_PUSHC97B_H__ +#include + +#include + +#define PUSH_HDR(p,m,c) do { \ + PUSH_ASSERT(!((m) & ~DRF_SMASK(NVC97B_DMA_METHOD_OFFSET)), "mthd"); \ + PUSH_ASSERT(!((c) & ~DRF_MASK(NVC97B_DMA_METHOD_COUNT)), "size"); \ + PUSH_DATA__((p), NVDEF(NVC97B, DMA, OPCODE, METHOD) | \ + NVVAL(NVC97B, DMA, METHOD_COUNT, (c)) | \ + NVVAL(NVC97B, DMA, METHOD_OFFSET, (m) >> 2), \ + " mthd 0x%04x size %d - %s", (u32)(m), (u32)(c), __func__); \ +} while(0) + +#define PUSH_MTHD_HDR(p,s,m,c) PUSH_HDR(p,m,c) +#define PUSH_MTHD_INC 4:4 +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 1b10c6c12f46..63621b1510f6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1401,6 +1401,8 @@ nouveau_connector_create(struct drm_device *dev, int index) nv_connector->aux.drm_dev = dev; nv_connector->aux.transfer = nouveau_connector_aux_xfer; nv_connector->aux.name = connector->name; + if (disp->disp.object.oclass >= GB202_DISP) + nv_connector->aux.no_zero_sized = true; drm_dp_aux_init(&nv_connector->aux); break; default: diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c index 14da22fa3b5b..fa42146252da 100644 --- a/drivers/gpu/drm/nouveau/nvif/disp.c +++ b/drivers/gpu/drm/nouveau/nvif/disp.c @@ -36,6 +36,7 @@ int nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass, struct nvif_disp *disp) { static const struct nvif_mclass disps[] = { + { GB202_DISP, 0 }, { AD102_DISP, 0 }, { GA102_DISP, 0 }, { TU102_DISP, 0 }, -- cgit v1.2.3