summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c1159
1 files changed, 1013 insertions, 146 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 3f20e49070ce..a46e13cc9ff1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -21,11 +21,11 @@
*
* Authors: Ben Skeggs
*/
-#include "nv50.h"
+#include "priv.h"
+#include "chan.h"
#include "head.h"
#include "ior.h"
-#include "channv50.h"
-#include "rootnv50.h"
+#include "outp.h"
#include <core/client.h>
#include <core/ramht.h>
@@ -34,150 +34,948 @@
#include <subdev/bios/init.h>
#include <subdev/bios/pll.h>
#include <subdev/devinit.h>
+#include <subdev/i2c.h>
+#include <subdev/mmu.h>
#include <subdev/timer.h>
-static const struct nvkm_disp_oclass *
-nv50_disp_root_(struct nvkm_disp *base)
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static void
+nv50_pior_clock(struct nvkm_ior *pior)
{
- return nv50_disp(base)->func->root;
+ struct nvkm_device *device = pior->disp->engine.subdev.device;
+ const u32 poff = nv50_ior_base(pior);
+
+ nvkm_mask(device, 0x614380 + poff, 0x00000707, 0x00000001);
}
+static int
+nv50_pior_dp_links(struct nvkm_ior *pior, struct nvkm_i2c_aux *aux)
+{
+ int ret = nvkm_i2c_aux_lnk_ctl(aux, pior->dp.nr, pior->dp.bw, pior->dp.ef);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static const struct nvkm_ior_func_dp
+nv50_pior_dp = {
+ .links = nv50_pior_dp_links,
+};
+
static void
-nv50_disp_intr_(struct nvkm_disp *base)
+nv50_pior_power_wait(struct nvkm_device *device, u32 poff)
{
- struct nv50_disp *disp = nv50_disp(base);
- disp->func->intr(disp);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61e004 + poff) & 0x80000000))
+ break;
+ );
}
static void
-nv50_disp_fini_(struct nvkm_disp *base)
+nv50_pior_power(struct nvkm_ior *pior, bool normal, bool pu, bool data, bool vsync, bool hsync)
{
- struct nv50_disp *disp = nv50_disp(base);
- disp->func->fini(disp);
+ struct nvkm_device *device = pior->disp->engine.subdev.device;
+ const u32 poff = nv50_ior_base(pior);
+ const u32 shift = normal ? 0 : 16;
+ const u32 state = 0x80000000 | (0x00000001 * !!pu) << shift;
+ const u32 field = 0x80000000 | (0x00000101 << shift);
+
+ nv50_pior_power_wait(device, poff);
+ nvkm_mask(device, 0x61e004 + poff, field, state);
+ nv50_pior_power_wait(device, poff);
}
-static int
-nv50_disp_init_(struct nvkm_disp *base)
+void
+nv50_pior_depth(struct nvkm_ior *ior, struct nvkm_ior_state *state, u32 ctrl)
{
- struct nv50_disp *disp = nv50_disp(base);
- return disp->func->init(disp);
+ /* GF119 moves this information to per-head methods, which is
+ * a lot more convenient, and where our shared code expect it.
+ */
+ if (state->head && state == &ior->asy) {
+ struct nvkm_head *head = nvkm_head_find(ior->disp, __ffs(state->head));
+
+ if (!WARN_ON(!head)) {
+ struct nvkm_head_state *state = &head->asy;
+ switch ((ctrl & 0x000f0000) >> 16) {
+ case 6: state->or.depth = 30; break;
+ case 5: state->or.depth = 24; break;
+ case 2: state->or.depth = 18; break;
+ case 0: state->or.depth = 18; break; /*XXX*/
+ default:
+ state->or.depth = 18;
+ WARN_ON(1);
+ break;
+ }
+ }
+ }
}
-static void *
-nv50_disp_dtor_(struct nvkm_disp *base)
+static void
+nv50_pior_state(struct nvkm_ior *pior, struct nvkm_ior_state *state)
{
- struct nv50_disp *disp = nv50_disp(base);
+ struct nvkm_device *device = pior->disp->engine.subdev.device;
+ const u32 coff = pior->id * 8 + (state == &pior->arm) * 4;
+ u32 ctrl = nvkm_rd32(device, 0x610b80 + coff);
+
+ state->proto_evo = (ctrl & 0x00000f00) >> 8;
+ state->rgdiv = 1;
+ switch (state->proto_evo) {
+ case 0: state->proto = TMDS; break;
+ default:
+ state->proto = UNKNOWN;
+ break;
+ }
- nvkm_ramht_del(&disp->ramht);
- nvkm_gpuobj_del(&disp->inst);
+ state->head = ctrl & 0x00000003;
+ nv50_pior_depth(pior, state, ctrl);
+}
- nvkm_event_fini(&disp->uevent);
- if (disp->wq)
- destroy_workqueue(disp->wq);
+static const struct nvkm_ior_func
+nv50_pior = {
+ .state = nv50_pior_state,
+ .power = nv50_pior_power,
+ .clock = nv50_pior_clock,
+ .dp = &nv50_pior_dp,
+};
- return disp;
+int
+nv50_pior_new(struct nvkm_disp *disp, int id)
+{
+ return nvkm_ior_new_(&nv50_pior, disp, PIOR, id, false);
}
-static int
-nv50_disp_oneinit_(struct nvkm_disp *base)
+int
+nv50_pior_cnt(struct nvkm_disp *disp, unsigned long *pmask)
{
- struct nv50_disp *disp = nv50_disp(base);
- const struct nv50_disp_func *func = disp->func;
- struct nvkm_subdev *subdev = &disp->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- int ret, i;
+ struct nvkm_device *device = disp->engine.subdev.device;
- if (func->wndw.cnt) {
- disp->wndw.nr = func->wndw.cnt(&disp->base, &disp->wndw.mask);
- nvkm_debug(subdev, "Window(s): %d (%08lx)\n",
- disp->wndw.nr, disp->wndw.mask);
+ *pmask = (nvkm_rd32(device, 0x610184) & 0x70000000) >> 28;
+ return 3;
+}
+
+void
+nv50_sor_clock(struct nvkm_ior *sor)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ const int div = sor->asy.link == 3;
+ const u32 soff = nv50_ior_base(sor);
+
+ nvkm_mask(device, 0x614300 + soff, 0x00000707, (div << 8) | div);
+}
+
+static void
+nv50_sor_power_wait(struct nvkm_device *device, u32 soff)
+{
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61c004 + soff) & 0x80000000))
+ break;
+ );
+}
+
+void
+nv50_sor_power(struct nvkm_ior *sor, bool normal, bool pu, bool data, bool vsync, bool hsync)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ const u32 soff = nv50_ior_base(sor);
+ const u32 shift = normal ? 0 : 16;
+ const u32 state = 0x80000000 | (0x00000001 * !!pu) << shift;
+ const u32 field = 0x80000000 | (0x00000001 << shift);
+
+ nv50_sor_power_wait(device, soff);
+ nvkm_mask(device, 0x61c004 + soff, field, state);
+ nv50_sor_power_wait(device, soff);
+
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
+ break;
+ );
+}
+
+void
+nv50_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ const u32 coff = sor->id * 8 + (state == &sor->arm) * 4;
+ u32 ctrl = nvkm_rd32(device, 0x610b70 + coff);
+
+ state->proto_evo = (ctrl & 0x00000f00) >> 8;
+ switch (state->proto_evo) {
+ case 0: state->proto = LVDS; state->link = 1; break;
+ case 1: state->proto = TMDS; state->link = 1; break;
+ case 2: state->proto = TMDS; state->link = 2; break;
+ case 5: state->proto = TMDS; state->link = 3; break;
+ default:
+ state->proto = UNKNOWN;
+ break;
}
- disp->head.nr = func->head.cnt(&disp->base, &disp->head.mask);
- nvkm_debug(subdev, " Head(s): %d (%02lx)\n",
- disp->head.nr, disp->head.mask);
- for_each_set_bit(i, &disp->head.mask, disp->head.nr) {
- ret = func->head.new(&disp->base, i);
- if (ret)
- return ret;
+ state->head = ctrl & 0x00000003;
+}
+
+static const struct nvkm_ior_func
+nv50_sor = {
+ .state = nv50_sor_state,
+ .power = nv50_sor_power,
+ .clock = nv50_sor_clock,
+};
+
+static int
+nv50_sor_new(struct nvkm_disp *disp, int id)
+{
+ return nvkm_ior_new_(&nv50_sor, disp, SOR, id, false);
+}
+
+int
+nv50_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+ struct nvkm_device *device = disp->engine.subdev.device;
+
+ *pmask = (nvkm_rd32(device, 0x610184) & 0x03000000) >> 24;
+ return 2;
+}
+
+static void
+nv50_dac_clock(struct nvkm_ior *dac)
+{
+ struct nvkm_device *device = dac->disp->engine.subdev.device;
+ const u32 doff = nv50_ior_base(dac);
+
+ nvkm_mask(device, 0x614280 + doff, 0x07070707, 0x00000000);
+}
+
+int
+nv50_dac_sense(struct nvkm_ior *dac, u32 loadval)
+{
+ struct nvkm_device *device = dac->disp->engine.subdev.device;
+ const u32 doff = nv50_ior_base(dac);
+
+ dac->func->power(dac, false, true, false, false, false);
+
+ nvkm_wr32(device, 0x61a00c + doff, 0x00100000 | loadval);
+ mdelay(9);
+ udelay(500);
+ loadval = nvkm_mask(device, 0x61a00c + doff, 0xffffffff, 0x00000000);
+
+ dac->func->power(dac, false, false, false, false, false);
+ if (!(loadval & 0x80000000))
+ return -ETIMEDOUT;
+
+ return (loadval & 0x38000000) >> 27;
+}
+
+static void
+nv50_dac_power_wait(struct nvkm_device *device, const u32 doff)
+{
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+ break;
+ );
+}
+
+void
+nv50_dac_power(struct nvkm_ior *dac, bool normal, bool pu, bool data, bool vsync, bool hsync)
+{
+ struct nvkm_device *device = dac->disp->engine.subdev.device;
+ const u32 doff = nv50_ior_base(dac);
+ const u32 shift = normal ? 0 : 16;
+ const u32 state = 0x80000000 | (0x00000040 * ! pu |
+ 0x00000010 * ! data |
+ 0x00000004 * ! vsync |
+ 0x00000001 * ! hsync) << shift;
+ const u32 field = 0xc0000000 | (0x00000055 << shift);
+
+ nv50_dac_power_wait(device, doff);
+ nvkm_mask(device, 0x61a004 + doff, field, state);
+ nv50_dac_power_wait(device, doff);
+}
+
+static void
+nv50_dac_state(struct nvkm_ior *dac, struct nvkm_ior_state *state)
+{
+ struct nvkm_device *device = dac->disp->engine.subdev.device;
+ const u32 coff = dac->id * 8 + (state == &dac->arm) * 4;
+ u32 ctrl = nvkm_rd32(device, 0x610b58 + coff);
+
+ state->proto_evo = (ctrl & 0x00000f00) >> 8;
+ switch (state->proto_evo) {
+ case 0: state->proto = CRT; break;
+ default:
+ state->proto = UNKNOWN;
+ break;
}
- if (func->dac.cnt) {
- disp->dac.nr = func->dac.cnt(&disp->base, &disp->dac.mask);
- nvkm_debug(subdev, " DAC(s): %d (%02lx)\n",
- disp->dac.nr, disp->dac.mask);
- for_each_set_bit(i, &disp->dac.mask, disp->dac.nr) {
- ret = func->dac.new(&disp->base, i);
- if (ret)
- return ret;
+ state->head = ctrl & 0x00000003;
+}
+
+static const struct nvkm_ior_func
+nv50_dac = {
+ .state = nv50_dac_state,
+ .power = nv50_dac_power,
+ .sense = nv50_dac_sense,
+ .clock = nv50_dac_clock,
+};
+
+int
+nv50_dac_new(struct nvkm_disp *disp, int id)
+{
+ return nvkm_ior_new_(&nv50_dac, disp, DAC, id, false);
+}
+
+int
+nv50_dac_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+ struct nvkm_device *device = disp->engine.subdev.device;
+
+ *pmask = (nvkm_rd32(device, 0x610184) & 0x00700000) >> 20;
+ return 3;
+}
+
+static void
+nv50_head_vblank_put(struct nvkm_head *head)
+{
+ struct nvkm_device *device = head->disp->engine.subdev.device;
+
+ nvkm_mask(device, 0x61002c, (4 << head->id), 0);
+}
+
+static void
+nv50_head_vblank_get(struct nvkm_head *head)
+{
+ struct nvkm_device *device = head->disp->engine.subdev.device;
+
+ nvkm_mask(device, 0x61002c, (4 << head->id), (4 << head->id));
+}
+
+static void
+nv50_head_rgclk(struct nvkm_head *head, int div)
+{
+ struct nvkm_device *device = head->disp->engine.subdev.device;
+
+ nvkm_mask(device, 0x614200 + (head->id * 0x800), 0x0000000f, div);
+}
+
+void
+nv50_head_rgpos(struct nvkm_head *head, u16 *hline, u16 *vline)
+{
+ struct nvkm_device *device = head->disp->engine.subdev.device;
+ const u32 hoff = head->id * 0x800;
+
+ /* vline read locks hline. */
+ *vline = nvkm_rd32(device, 0x616340 + hoff) & 0x0000ffff;
+ *hline = nvkm_rd32(device, 0x616344 + hoff) & 0x0000ffff;
+}
+
+static void
+nv50_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
+{
+ struct nvkm_device *device = head->disp->engine.subdev.device;
+ const u32 hoff = head->id * 0x540 + (state == &head->arm) * 4;
+ u32 data;
+
+ data = nvkm_rd32(device, 0x610ae8 + hoff);
+ state->vblanke = (data & 0xffff0000) >> 16;
+ state->hblanke = (data & 0x0000ffff);
+ data = nvkm_rd32(device, 0x610af0 + hoff);
+ state->vblanks = (data & 0xffff0000) >> 16;
+ state->hblanks = (data & 0x0000ffff);
+ data = nvkm_rd32(device, 0x610af8 + hoff);
+ state->vtotal = (data & 0xffff0000) >> 16;
+ state->htotal = (data & 0x0000ffff);
+ data = nvkm_rd32(device, 0x610b00 + hoff);
+ state->vsynce = (data & 0xffff0000) >> 16;
+ state->hsynce = (data & 0x0000ffff);
+ state->hz = (nvkm_rd32(device, 0x610ad0 + hoff) & 0x003fffff) * 1000;
+}
+
+static const struct nvkm_head_func
+nv50_head = {
+ .state = nv50_head_state,
+ .rgpos = nv50_head_rgpos,
+ .rgclk = nv50_head_rgclk,
+ .vblank_get = nv50_head_vblank_get,
+ .vblank_put = nv50_head_vblank_put,
+};
+
+int
+nv50_head_new(struct nvkm_disp *disp, int id)
+{
+ return nvkm_head_new_(&nv50_head, disp, id);
+}
+
+int
+nv50_head_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+ *pmask = 3;
+ return 2;
+}
+
+
+static void
+nv50_disp_mthd_list(struct nvkm_disp *disp, int debug, u32 base, int c,
+ const struct nvkm_disp_mthd_list *list, int inst)
+{
+ struct nvkm_subdev *subdev = &disp->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int i;
+
+ for (i = 0; list->data[i].mthd; i++) {
+ if (list->data[i].addr) {
+ u32 next = nvkm_rd32(device, list->data[i].addr + base + 0);
+ u32 prev = nvkm_rd32(device, list->data[i].addr + base + c);
+ u32 mthd = list->data[i].mthd + (list->mthd * inst);
+ const char *name = list->data[i].name;
+ char mods[16];
+
+ if (prev != next)
+ snprintf(mods, sizeof(mods), "-> %08x", next);
+ else
+ snprintf(mods, sizeof(mods), "%13c", ' ');
+
+ nvkm_printk_(subdev, debug, info,
+ "\t%04x: %08x %s%s%s\n",
+ mthd, prev, mods, name ? " // " : "",
+ name ? name : "");
}
}
+}
- if (func->pior.cnt) {
- disp->pior.nr = func->pior.cnt(&disp->base, &disp->pior.mask);
- nvkm_debug(subdev, " PIOR(s): %d (%02lx)\n",
- disp->pior.nr, disp->pior.mask);
- for_each_set_bit(i, &disp->pior.mask, disp->pior.nr) {
- ret = func->pior.new(&disp->base, i);
- if (ret)
- return ret;
+void
+nv50_disp_chan_mthd(struct nvkm_disp_chan *chan, int debug)
+{
+ struct nvkm_disp *disp = chan->disp;
+ struct nvkm_subdev *subdev = &disp->engine.subdev;
+ const struct nvkm_disp_chan_mthd *mthd = chan->mthd;
+ const struct nvkm_disp_mthd_list *list;
+ int i, j;
+
+ if (debug > subdev->debug)
+ return;
+ if (!mthd)
+ return;
+
+ for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
+ u32 base = chan->head * mthd->addr;
+ for (j = 0; j < mthd->data[i].nr; j++, base += list->addr) {
+ const char *cname = mthd->name;
+ const char *sname = "";
+ char cname_[16], sname_[16];
+
+ if (mthd->addr) {
+ snprintf(cname_, sizeof(cname_), "%s %d",
+ mthd->name, chan->chid.user);
+ cname = cname_;
+ }
+
+ if (mthd->data[i].nr > 1) {
+ snprintf(sname_, sizeof(sname_), " - %s %d",
+ mthd->data[i].name, j);
+ sname = sname_;
+ }
+
+ nvkm_printk_(subdev, debug, info, "%s%s:\n", cname, sname);
+ nv50_disp_mthd_list(disp, debug, base, mthd->prev,
+ list, j);
}
}
+}
- disp->sor.nr = func->sor.cnt(&disp->base, &disp->sor.mask);
- nvkm_debug(subdev, " SOR(s): %d (%02lx)\n",
- disp->sor.nr, disp->sor.mask);
- for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) {
- ret = func->sor.new(&disp->base, i);
- if (ret)
- return ret;
+static void
+nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
+{
+ struct nvkm_disp *disp = container_of(event, typeof(*disp), uevent);
+ struct nvkm_device *device = disp->engine.subdev.device;
+ nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000000 << index);
+ nvkm_wr32(device, 0x610020, 0x00000001 << index);
+}
+
+static void
+nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
+{
+ struct nvkm_disp *disp = container_of(event, typeof(*disp), uevent);
+ struct nvkm_device *device = disp->engine.subdev.device;
+ nvkm_wr32(device, 0x610020, 0x00000001 << index);
+ nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000001 << index);
+}
+
+void
+nv50_disp_chan_uevent_send(struct nvkm_disp *disp, int chid)
+{
+ nvkm_event_send(&disp->uevent, NVKM_DISP_EVENT_CHAN_AWAKEN, chid, NULL, 0);
+}
+
+const struct nvkm_event_func
+nv50_disp_chan_uevent = {
+ .init = nv50_disp_chan_uevent_init,
+ .fini = nv50_disp_chan_uevent_fini,
+};
+
+u64
+nv50_disp_chan_user(struct nvkm_disp_chan *chan, u64 *psize)
+{
+ *psize = 0x1000;
+ return 0x640000 + (chan->chid.user * 0x1000);
+}
+
+void
+nv50_disp_chan_intr(struct nvkm_disp_chan *chan, bool en)
+{
+ struct nvkm_device *device = chan->disp->engine.subdev.device;
+ const u32 mask = 0x00010001 << chan->chid.user;
+ const u32 data = en ? 0x00010000 << chan->chid.user : 0x00000000;
+ nvkm_mask(device, 0x610028, mask, data);
+}
+
+static void
+nv50_disp_pioc_fini(struct nvkm_disp_chan *chan)
+{
+ struct nvkm_disp *disp = chan->disp;
+ struct nvkm_subdev *subdev = &disp->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int ctrl = chan->chid.ctrl;
+ int user = chan->chid.user;
+
+ nvkm_mask(device, 0x610200 + (ctrl * 0x10), 0x00000001, 0x00000000);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d timeout: %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
}
+}
- ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL,
- &disp->inst);
- if (ret)
- return ret;
+static int
+nv50_disp_pioc_init(struct nvkm_disp_chan *chan)
+{
+ struct nvkm_disp *disp = chan->disp;
+ struct nvkm_subdev *subdev = &disp->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int ctrl = chan->chid.ctrl;
+ int user = chan->chid.user;
- return nvkm_ramht_new(device, func->ramht_size ? func->ramht_size :
- 0x1000, 0, disp->inst, &disp->ramht);
+ nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00002000);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d timeout0: %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
+ return -EBUSY;
+ }
+
+ nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00000001);
+ if (nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x610200 + (ctrl * 0x10));
+ if ((tmp & 0x00030000) == 0x00010000)
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d timeout1: %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
}
-static const struct nvkm_disp_func
-nv50_disp_ = {
- .dtor = nv50_disp_dtor_,
- .oneinit = nv50_disp_oneinit_,
- .init = nv50_disp_init_,
- .fini = nv50_disp_fini_,
- .intr = nv50_disp_intr_,
- .root = nv50_disp_root_,
+const struct nvkm_disp_chan_func
+nv50_disp_pioc_func = {
+ .init = nv50_disp_pioc_init,
+ .fini = nv50_disp_pioc_fini,
+ .intr = nv50_disp_chan_intr,
+ .user = nv50_disp_chan_user,
};
int
-nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
+nv50_disp_dmac_bind(struct nvkm_disp_chan *chan, struct nvkm_object *object, u32 handle)
{
- struct nv50_disp *disp;
- int ret;
+ return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -10, handle,
+ chan->chid.user << 28 | chan->chid.user);
+}
- if (!(disp = kzalloc(sizeof(*disp), GFP_KERNEL)))
- return -ENOMEM;
- disp->func = func;
- *pdisp = &disp->base;
+static void
+nv50_disp_dmac_fini(struct nvkm_disp_chan *chan)
+{
+ struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int ctrl = chan->chid.ctrl;
+ int user = chan->chid.user;
+
+ /* deactivate channel */
+ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
+ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
+ }
- ret = nvkm_disp_ctor(&nv50_disp_, device, type, inst, &disp->base);
- if (ret)
- return ret;
+ chan->suspend_put = nvkm_rd32(device, 0x640000 + (ctrl * 0x1000));
+}
+
+static int
+nv50_disp_dmac_init(struct nvkm_disp_chan *chan)
+{
+ struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int ctrl = chan->chid.ctrl;
+ int user = chan->chid.user;
+
+ /* initialise channel for dma command submission */
+ nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
+ nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
+ nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
+ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), chan->suspend_put);
+ nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d init timeout, %08x\n", user,
+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
+ return -EBUSY;
+ }
- disp->wq = create_singlethread_workqueue("nvkm-disp");
- if (!disp->wq)
- return -ENOMEM;
+ return 0;
+}
- INIT_WORK(&disp->supervisor, func->super);
+int
+nv50_disp_dmac_push(struct nvkm_disp_chan *chan, u64 object)
+{
+ chan->memory = nvkm_umem_search(chan->object.client, object);
+ if (IS_ERR(chan->memory))
+ return PTR_ERR(chan->memory);
+
+ if (nvkm_memory_size(chan->memory) < 0x1000)
+ return -EINVAL;
+
+ switch (nvkm_memory_target(chan->memory)) {
+ case NVKM_MEM_TARGET_VRAM: chan->push = 0x00000001; break;
+ case NVKM_MEM_TARGET_NCOH: chan->push = 0x00000002; break;
+ case NVKM_MEM_TARGET_HOST: chan->push = 0x00000003; break;
+ default:
+ return -EINVAL;
+ }
- return nvkm_event_init(func->uevent, 1, ARRAY_SIZE(disp->chan),
- &disp->uevent);
+ chan->push |= nvkm_memory_addr(chan->memory) >> 8;
+ return 0;
}
+const struct nvkm_disp_chan_func
+nv50_disp_dmac_func = {
+ .push = nv50_disp_dmac_push,
+ .init = nv50_disp_dmac_init,
+ .fini = nv50_disp_dmac_fini,
+ .intr = nv50_disp_chan_intr,
+ .user = nv50_disp_chan_user,
+ .bind = nv50_disp_dmac_bind,
+};
+
+const struct nvkm_disp_chan_user
+nv50_disp_curs = {
+ .func = &nv50_disp_pioc_func,
+ .ctrl = 7,
+ .user = 7,
+};
+
+const struct nvkm_disp_chan_user
+nv50_disp_oimm = {
+ .func = &nv50_disp_pioc_func,
+ .ctrl = 5,
+ .user = 5,
+};
+
+static const struct nvkm_disp_mthd_list
+nv50_disp_ovly_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x0009a0 },
+ { 0x0088, 0x0009c0 },
+ { 0x008c, 0x0009c8 },
+ { 0x0090, 0x6109b4 },
+ { 0x0094, 0x610970 },
+ { 0x00a0, 0x610998 },
+ { 0x00a4, 0x610964 },
+ { 0x00c0, 0x610958 },
+ { 0x00e0, 0x6109a8 },
+ { 0x00e4, 0x6109d0 },
+ { 0x00e8, 0x6109d8 },
+ { 0x0100, 0x61094c },
+ { 0x0104, 0x610984 },
+ { 0x0108, 0x61098c },
+ { 0x0800, 0x6109f8 },
+ { 0x0808, 0x610a08 },
+ { 0x080c, 0x610a10 },
+ { 0x0810, 0x610a00 },
+ {}
+ }
+};
+
+static const struct nvkm_disp_chan_mthd
+nv50_disp_ovly_mthd = {
+ .name = "Overlay",
+ .addr = 0x000540,
+ .prev = 0x000004,
+ .data = {
+ { "Global", 1, &nv50_disp_ovly_mthd_base },
+ {}
+ }
+};
+
+static const struct nvkm_disp_chan_user
+nv50_disp_ovly = {
+ .func = &nv50_disp_dmac_func,
+ .ctrl = 3,
+ .user = 3,
+ .mthd = &nv50_disp_ovly_mthd,
+};
+
+static const struct nvkm_disp_mthd_list
+nv50_disp_base_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x0008c4 },
+ { 0x0088, 0x0008d0 },
+ { 0x008c, 0x0008dc },
+ { 0x0090, 0x0008e4 },
+ { 0x0094, 0x610884 },
+ { 0x00a0, 0x6108a0 },
+ { 0x00a4, 0x610878 },
+ { 0x00c0, 0x61086c },
+ { 0x00e0, 0x610858 },
+ { 0x00e4, 0x610860 },
+ { 0x00e8, 0x6108ac },
+ { 0x00ec, 0x6108b4 },
+ { 0x0100, 0x610894 },
+ { 0x0110, 0x6108bc },
+ { 0x0114, 0x61088c },
+ {}
+ }
+};
+
+const struct nvkm_disp_mthd_list
+nv50_disp_base_mthd_image = {
+ .mthd = 0x0400,
+ .addr = 0x000000,
+ .data = {
+ { 0x0800, 0x6108f0 },
+ { 0x0804, 0x6108fc },
+ { 0x0808, 0x61090c },
+ { 0x080c, 0x610914 },
+ { 0x0810, 0x610904 },
+ {}
+ }
+};
+
+static const struct nvkm_disp_chan_mthd
+nv50_disp_base_mthd = {
+ .name = "Base",
+ .addr = 0x000540,
+ .prev = 0x000004,
+ .data = {
+ { "Global", 1, &nv50_disp_base_mthd_base },
+ { "Image", 2, &nv50_disp_base_mthd_image },
+ {}
+ }
+};
+
+static const struct nvkm_disp_chan_user
+nv50_disp_base = {
+ .func = &nv50_disp_dmac_func,
+ .ctrl = 1,
+ .user = 1,
+ .mthd = &nv50_disp_base_mthd,
+};
+
+const struct nvkm_disp_mthd_list
+nv50_disp_core_mthd_base = {
+ .mthd = 0x0000,
+ .addr = 0x000000,
+ .data = {
+ { 0x0080, 0x000000 },
+ { 0x0084, 0x610bb8 },
+ { 0x0088, 0x610b9c },
+ { 0x008c, 0x000000 },
+ {}
+ }
+};
+
+static const struct nvkm_disp_mthd_list
+nv50_disp_core_mthd_dac = {
+ .mthd = 0x0080,
+ .addr = 0x000008,
+ .data = {
+ { 0x0400, 0x610b58 },
+ { 0x0404, 0x610bdc },
+ { 0x0420, 0x610828 },
+ {}
+ }
+};
+
+const struct nvkm_disp_mthd_list
+nv50_disp_core_mthd_sor = {
+ .mthd = 0x0040,
+ .addr = 0x000008,
+ .data = {
+ { 0x0600, 0x610b70 },
+ {}
+ }
+};
+
+const struct nvkm_disp_mthd_list
+nv50_disp_core_mthd_pior = {
+ .mthd = 0x0040,
+ .addr = 0x000008,
+ .data = {
+ { 0x0700, 0x610b80 },
+ {}
+ }
+};
+
+static const struct nvkm_disp_mthd_list
+nv50_disp_core_mthd_head = {
+ .mthd = 0x0400,
+ .addr = 0x000540,
+ .data = {
+ { 0x0800, 0x610ad8 },
+ { 0x0804, 0x610ad0 },
+ { 0x0808, 0x610a48 },
+ { 0x080c, 0x610a78 },
+ { 0x0810, 0x610ac0 },
+ { 0x0814, 0x610af8 },
+ { 0x0818, 0x610b00 },
+ { 0x081c, 0x610ae8 },
+ { 0x0820, 0x610af0 },
+ { 0x0824, 0x610b08 },
+ { 0x0828, 0x610b10 },
+ { 0x082c, 0x610a68 },
+ { 0x0830, 0x610a60 },
+ { 0x0834, 0x000000 },
+ { 0x0838, 0x610a40 },
+ { 0x0840, 0x610a24 },
+ { 0x0844, 0x610a2c },
+ { 0x0848, 0x610aa8 },
+ { 0x084c, 0x610ab0 },
+ { 0x0860, 0x610a84 },
+ { 0x0864, 0x610a90 },
+ { 0x0868, 0x610b18 },
+ { 0x086c, 0x610b20 },
+ { 0x0870, 0x610ac8 },
+ { 0x0874, 0x610a38 },
+ { 0x0880, 0x610a58 },
+ { 0x0884, 0x610a9c },
+ { 0x08a0, 0x610a70 },
+ { 0x08a4, 0x610a50 },
+ { 0x08a8, 0x610ae0 },
+ { 0x08c0, 0x610b28 },
+ { 0x08c4, 0x610b30 },
+ { 0x08c8, 0x610b40 },
+ { 0x08d4, 0x610b38 },
+ { 0x08d8, 0x610b48 },
+ { 0x08dc, 0x610b50 },
+ { 0x0900, 0x610a18 },
+ { 0x0904, 0x610ab8 },
+ {}
+ }
+};
+
+static const struct nvkm_disp_chan_mthd
+nv50_disp_core_mthd = {
+ .name = "Core",
+ .addr = 0x000000,
+ .prev = 0x000004,
+ .data = {
+ { "Global", 1, &nv50_disp_core_mthd_base },
+ { "DAC", 3, &nv50_disp_core_mthd_dac },
+ { "SOR", 2, &nv50_disp_core_mthd_sor },
+ { "PIOR", 3, &nv50_disp_core_mthd_pior },
+ { "HEAD", 2, &nv50_disp_core_mthd_head },
+ {}
+ }
+};
+
+static void
+nv50_disp_core_fini(struct nvkm_disp_chan *chan)
+{
+ struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+
+ /* deactivate channel */
+ nvkm_mask(device, 0x610200, 0x00000010, 0x00000000);
+ nvkm_mask(device, 0x610200, 0x00000003, 0x00000000);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610200) & 0x001e0000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "core fini: %08x\n",
+ nvkm_rd32(device, 0x610200));
+ }
+
+ chan->suspend_put = nvkm_rd32(device, 0x640000);
+}
+
+static int
+nv50_disp_core_init(struct nvkm_disp_chan *chan)
+{
+ struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+
+ /* attempt to unstick channel from some unknown state */
+ if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000)
+ nvkm_mask(device, 0x610200, 0x00800000, 0x00800000);
+ if ((nvkm_rd32(device, 0x610200) & 0x003f0000) == 0x00030000)
+ nvkm_mask(device, 0x610200, 0x00600000, 0x00600000);
+
+ /* initialise channel for dma command submission */
+ nvkm_wr32(device, 0x610204, chan->push);
+ nvkm_wr32(device, 0x610208, 0x00010000);
+ nvkm_wr32(device, 0x61020c, 0x00000000);
+ nvkm_mask(device, 0x610200, 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000, chan->suspend_put);
+ nvkm_wr32(device, 0x610200, 0x01000013);
+
+ /* wait for it to go inactive */
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610200) & 0x80000000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "core init: %08x\n",
+ nvkm_rd32(device, 0x610200));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+const struct nvkm_disp_chan_func
+nv50_disp_core_func = {
+ .push = nv50_disp_dmac_push,
+ .init = nv50_disp_core_init,
+ .fini = nv50_disp_core_fini,
+ .intr = nv50_disp_chan_intr,
+ .user = nv50_disp_chan_user,
+ .bind = nv50_disp_dmac_bind,
+};
+
+static const struct nvkm_disp_chan_user
+nv50_disp_core = {
+ .func = &nv50_disp_core_func,
+ .ctrl = 0,
+ .user = 0,
+ .mthd = &nv50_disp_core_mthd,
+};
+
static u32
nv50_disp_super_iedt(struct nvkm_head *head, struct nvkm_outp *outp,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
@@ -278,7 +1076,7 @@ static struct nvkm_ior *
nv50_disp_super_ior_asy(struct nvkm_head *head)
{
struct nvkm_ior *ior;
- list_for_each_entry(ior, &head->disp->ior, head) {
+ list_for_each_entry(ior, &head->disp->iors, head) {
if (ior->asy.head & (1 << head->id)) {
HEAD_DBG(head, "to %s", ior->name);
return ior;
@@ -292,7 +1090,7 @@ static struct nvkm_ior *
nv50_disp_super_ior_arm(struct nvkm_head *head)
{
struct nvkm_ior *ior;
- list_for_each_entry(ior, &head->disp->ior, head) {
+ list_for_each_entry(ior, &head->disp->iors, head) {
if (ior->arm.head & (1 << head->id)) {
HEAD_DBG(head, "on %s", ior->name);
return ior;
@@ -303,7 +1101,7 @@ nv50_disp_super_ior_arm(struct nvkm_head *head)
}
void
-nv50_disp_super_3_0(struct nv50_disp *disp, struct nvkm_head *head)
+nv50_disp_super_3_0(struct nvkm_disp *disp, struct nvkm_head *head)
{
struct nvkm_ior *ior;
@@ -346,7 +1144,7 @@ nv50_disp_super_2_2_dp(struct nvkm_head *head, struct nvkm_ior *ior)
do_div(v, khz);
v = v - ((36 / ior->dp.nr) + 3) - 1;
- ior->func->dp.audio_sym(ior, head->id, h, v);
+ ior->func->dp->audio_sym(ior, head->id, h, v);
/* watermark / activesym */
link_data_rate = (khz * head->asy.or.depth / 8) / ior->dp.nr;
@@ -355,7 +1153,7 @@ nv50_disp_super_2_2_dp(struct nvkm_head *head, struct nvkm_ior *ior)
link_ratio = link_data_rate * symbol;
do_div(link_ratio, linkKBps);
- for (TU = 64; ior->func->dp.activesym && TU >= 32; TU--) {
+ for (TU = 64; ior->func->dp->activesym && TU >= 32; TU--) {
/* calculate average number of valid symbols in each TU */
u32 tu_valid = link_ratio * TU;
u32 calc, diff;
@@ -406,13 +1204,13 @@ nv50_disp_super_2_2_dp(struct nvkm_head *head, struct nvkm_ior *ior)
}
}
- if (ior->func->dp.activesym) {
+ if (ior->func->dp->activesym) {
if (!bestTU) {
nvkm_error(subdev, "unable to determine dp config\n");
return;
}
- ior->func->dp.activesym(ior, head->id, bestTU,
- bestVTUa, bestVTUf, bestVTUi);
+
+ ior->func->dp->activesym(ior, head->id, bestTU, bestVTUa, bestVTUf, bestVTUi);
} else {
bestTU = 64;
}
@@ -424,11 +1222,11 @@ nv50_disp_super_2_2_dp(struct nvkm_head *head, struct nvkm_ior *ior)
do_div(unk, symbol);
unk += 6;
- ior->func->dp.watermark(ior, head->id, unk);
+ ior->func->dp->watermark(ior, head->id, unk);
}
void
-nv50_disp_super_2_2(struct nv50_disp *disp, struct nvkm_head *head)
+nv50_disp_super_2_2(struct nvkm_disp *disp, struct nvkm_head *head)
{
const u32 khz = head->asy.hz / 1000;
struct nvkm_outp *outp;
@@ -475,9 +1273,9 @@ nv50_disp_super_2_2(struct nv50_disp *disp, struct nvkm_head *head)
}
void
-nv50_disp_super_2_1(struct nv50_disp *disp, struct nvkm_head *head)
+nv50_disp_super_2_1(struct nvkm_disp *disp, struct nvkm_head *head)
{
- struct nvkm_devinit *devinit = disp->base.engine.subdev.device->devinit;
+ struct nvkm_devinit *devinit = disp->engine.subdev.device->devinit;
const u32 khz = head->asy.hz / 1000;
HEAD_DBG(head, "supervisor 2.1 - %d khz", khz);
if (khz)
@@ -485,7 +1283,7 @@ nv50_disp_super_2_1(struct nv50_disp *disp, struct nvkm_head *head)
}
void
-nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head)
+nv50_disp_super_2_0(struct nvkm_disp *disp, struct nvkm_head *head)
{
struct nvkm_outp *outp;
struct nvkm_ior *ior;
@@ -509,7 +1307,7 @@ nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head)
}
void
-nv50_disp_super_1_0(struct nv50_disp *disp, struct nvkm_head *head)
+nv50_disp_super_1_0(struct nvkm_disp *disp, struct nvkm_head *head)
{
struct nvkm_ior *ior;
@@ -524,17 +1322,17 @@ nv50_disp_super_1_0(struct nv50_disp *disp, struct nvkm_head *head)
}
void
-nv50_disp_super_1(struct nv50_disp *disp)
+nv50_disp_super_1(struct nvkm_disp *disp)
{
struct nvkm_head *head;
struct nvkm_ior *ior;
- list_for_each_entry(head, &disp->base.head, head) {
+ list_for_each_entry(head, &disp->heads, head) {
head->func->state(head, &head->arm);
head->func->state(head, &head->asy);
}
- list_for_each_entry(ior, &disp->base.ior, head) {
+ list_for_each_entry(ior, &disp->iors, head) {
ior->func->state(ior, &ior->arm);
ior->func->state(ior, &ior->asy);
}
@@ -543,19 +1341,21 @@ nv50_disp_super_1(struct nv50_disp *disp)
void
nv50_disp_super(struct work_struct *work)
{
- struct nv50_disp *disp =
- container_of(work, struct nv50_disp, supervisor);
- struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_disp *disp = container_of(work, struct nvkm_disp, super.work);
+ struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_head *head;
- u32 super = nvkm_rd32(device, 0x610030);
+ u32 super;
+
+ mutex_lock(&disp->super.mutex);
+ super = nvkm_rd32(device, 0x610030);
- nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super, super);
+ nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super.pending, super);
- if (disp->super & 0x00000010) {
+ if (disp->super.pending & 0x00000010) {
nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
nv50_disp_super_1(disp);
- list_for_each_entry(head, &disp->base.head, head) {
+ list_for_each_entry(head, &disp->heads, head) {
if (!(super & (0x00000020 << head->id)))
continue;
if (!(super & (0x00000080 << head->id)))
@@ -563,26 +1363,26 @@ nv50_disp_super(struct work_struct *work)
nv50_disp_super_1_0(disp, head);
}
} else
- if (disp->super & 0x00000020) {
- list_for_each_entry(head, &disp->base.head, head) {
+ if (disp->super.pending & 0x00000020) {
+ list_for_each_entry(head, &disp->heads, head) {
if (!(super & (0x00000080 << head->id)))
continue;
nv50_disp_super_2_0(disp, head);
}
- nvkm_outp_route(&disp->base);
- list_for_each_entry(head, &disp->base.head, head) {
+ nvkm_outp_route(disp);
+ list_for_each_entry(head, &disp->heads, head) {
if (!(super & (0x00000200 << head->id)))
continue;
nv50_disp_super_2_1(disp, head);
}
- list_for_each_entry(head, &disp->base.head, head) {
+ list_for_each_entry(head, &disp->heads, head) {
if (!(super & (0x00000080 << head->id)))
continue;
nv50_disp_super_2_2(disp, head);
}
} else
- if (disp->super & 0x00000040) {
- list_for_each_entry(head, &disp->base.head, head) {
+ if (disp->super.pending & 0x00000040) {
+ list_for_each_entry(head, &disp->heads, head) {
if (!(super & (0x00000080 << head->id)))
continue;
nv50_disp_super_3_0(disp, head);
@@ -590,6 +1390,7 @@ nv50_disp_super(struct work_struct *work)
}
nvkm_wr32(device, 0x610030, 0x80000000);
+ mutex_unlock(&disp->super.mutex);
}
const struct nvkm_enum
@@ -611,9 +1412,9 @@ nv50_disp_intr_error_code[] = {
};
static void
-nv50_disp_intr_error(struct nv50_disp *disp, int chid)
+nv50_disp_intr_error(struct nvkm_disp *disp, int chid)
{
- struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08));
u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08));
@@ -645,9 +1446,9 @@ nv50_disp_intr_error(struct nv50_disp *disp, int chid)
}
void
-nv50_disp_intr(struct nv50_disp *disp)
+nv50_disp_intr(struct nvkm_disp *disp)
{
- struct nvkm_device *device = disp->base.engine.subdev.device;
+ struct nvkm_device *device = disp->engine.subdev.device;
u32 intr0 = nvkm_rd32(device, 0x610020);
u32 intr1 = nvkm_rd32(device, 0x610024);
@@ -664,35 +1465,35 @@ nv50_disp_intr(struct nv50_disp *disp)
}
if (intr1 & 0x00000004) {
- nvkm_disp_vblank(&disp->base, 0);
+ nvkm_disp_vblank(disp, 0);
nvkm_wr32(device, 0x610024, 0x00000004);
}
if (intr1 & 0x00000008) {
- nvkm_disp_vblank(&disp->base, 1);
+ nvkm_disp_vblank(disp, 1);
nvkm_wr32(device, 0x610024, 0x00000008);
}
if (intr1 & 0x00000070) {
- disp->super = (intr1 & 0x00000070);
- queue_work(disp->wq, &disp->supervisor);
- nvkm_wr32(device, 0x610024, disp->super);
+ disp->super.pending = (intr1 & 0x00000070);
+ queue_work(disp->super.wq, &disp->super.work);
+ nvkm_wr32(device, 0x610024, disp->super.pending);
}
}
void
-nv50_disp_fini(struct nv50_disp *disp)
+nv50_disp_fini(struct nvkm_disp *disp)
{
- struct nvkm_device *device = disp->base.engine.subdev.device;
+ struct nvkm_device *device = disp->engine.subdev.device;
/* disable all interrupts */
nvkm_wr32(device, 0x610024, 0x00000000);
nvkm_wr32(device, 0x610020, 0x00000000);
}
int
-nv50_disp_init(struct nv50_disp *disp)
+nv50_disp_init(struct nvkm_disp *disp)
{
- struct nvkm_device *device = disp->base.engine.subdev.device;
+ struct nvkm_device *device = disp->engine.subdev.device;
struct nvkm_head *head;
u32 tmp;
int i;
@@ -705,7 +1506,7 @@ nv50_disp_init(struct nv50_disp *disp)
nvkm_wr32(device, 0x610184, tmp);
/* ... CRTC caps */
- list_for_each_entry(head, &disp->base.head, head) {
+ list_for_each_entry(head, &disp->heads, head) {
tmp = nvkm_rd32(device, 0x616100 + (head->id * 0x800));
nvkm_wr32(device, 0x610190 + (head->id * 0x10), tmp);
tmp = nvkm_rd32(device, 0x616104 + (head->id * 0x800));
@@ -754,23 +1555,89 @@ nv50_disp_init(struct nv50_disp *disp)
return 0;
}
-static const struct nv50_disp_func
+int
+nv50_disp_oneinit(struct nvkm_disp *disp)
+{
+ const struct nvkm_disp_func *func = disp->func;
+ struct nvkm_subdev *subdev = &disp->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int ret, i;
+
+ if (func->wndw.cnt) {
+ disp->wndw.nr = func->wndw.cnt(disp, &disp->wndw.mask);
+ nvkm_debug(subdev, "Window(s): %d (%08lx)\n", disp->wndw.nr, disp->wndw.mask);
+ }
+
+ disp->head.nr = func->head.cnt(disp, &disp->head.mask);
+ nvkm_debug(subdev, " Head(s): %d (%02lx)\n", disp->head.nr, disp->head.mask);
+ for_each_set_bit(i, &disp->head.mask, disp->head.nr) {
+ ret = func->head.new(disp, i);
+ if (ret)
+ return ret;
+ }
+
+ if (func->dac.cnt) {
+ disp->dac.nr = func->dac.cnt(disp, &disp->dac.mask);
+ nvkm_debug(subdev, " DAC(s): %d (%02lx)\n", disp->dac.nr, disp->dac.mask);
+ for_each_set_bit(i, &disp->dac.mask, disp->dac.nr) {
+ ret = func->dac.new(disp, i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (func->pior.cnt) {
+ disp->pior.nr = func->pior.cnt(disp, &disp->pior.mask);
+ nvkm_debug(subdev, " PIOR(s): %d (%02lx)\n", disp->pior.nr, disp->pior.mask);
+ for_each_set_bit(i, &disp->pior.mask, disp->pior.nr) {
+ ret = func->pior.new(disp, i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ disp->sor.nr = func->sor.cnt(disp, &disp->sor.mask);
+ nvkm_debug(subdev, " SOR(s): %d (%02lx)\n", disp->sor.nr, disp->sor.mask);
+ for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) {
+ ret = func->sor.new(disp, i);
+ if (ret)
+ return ret;
+ }
+
+ ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL, &disp->inst);
+ if (ret)
+ return ret;
+
+ return nvkm_ramht_new(device, func->ramht_size ? func->ramht_size :
+ 0x1000, 0, disp->inst, &disp->ramht);
+}
+
+static const struct nvkm_disp_func
nv50_disp = {
+ .oneinit = nv50_disp_oneinit,
.init = nv50_disp_init,
.fini = nv50_disp_fini,
.intr = nv50_disp_intr,
- .uevent = &nv50_disp_chan_uevent,
.super = nv50_disp_super,
- .root = &nv50_disp_root_oclass,
+ .uevent = &nv50_disp_chan_uevent,
.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
.sor = { .cnt = nv50_sor_cnt, .new = nv50_sor_new },
.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
+ .root = { 0, 0, NV50_DISP },
+ .user = {
+ {{0,0,NV50_DISP_CURSOR }, nvkm_disp_chan_new, &nv50_disp_curs },
+ {{0,0,NV50_DISP_OVERLAY }, nvkm_disp_chan_new, &nv50_disp_oimm },
+ {{0,0,NV50_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, &nv50_disp_base },
+ {{0,0,NV50_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &nv50_disp_core },
+ {{0,0,NV50_DISP_OVERLAY_CHANNEL_DMA}, nvkm_disp_chan_new, &nv50_disp_ovly },
+ {}
+ }
};
int
nv50_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&nv50_disp, device, type, inst, pdisp);
+ return nvkm_disp_new_(&nv50_disp, device, type, inst, pdisp);
}