summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nv20_fb.c
blob: 522e4dade32f5293dc52508a092accf507750558 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"

static struct drm_mm_node *
nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
	struct drm_mm_node *mem;
	int ret;

	ret = drm_mm_pre_get(&pfb->tag_heap);
	if (ret)
		return NULL;

	spin_lock(&dev_priv->tile.lock);
	mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
	if (mem)
		mem = drm_mm_get_block_atomic(mem, size, 0);
	spin_unlock(&dev_priv->tile.lock);

	return mem;
}

static void
nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node **pmem)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct drm_mm_node *mem = *pmem;
	if (mem) {
		spin_lock(&dev_priv->tile.lock);
		drm_mm_put_block(mem);
		spin_unlock(&dev_priv->tile.lock);
		*pmem = NULL;
	}
}

void
nv20_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
			 uint32_t size, uint32_t pitch, uint32_t flags)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
	int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);

	tile->addr  = 0x00000001 | addr;
	tile->limit = max(1u, addr + size) - 1;
	tile->pitch = pitch;

	/* Allocate some of the on-die tag memory, used to store Z
	 * compression meta-data (most likely just a bitmap determining
	 * if a given tile is compressed or not).
	 */
	if (flags & NOUVEAU_GEM_TILE_ZETA) {
		tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
		if (tile->tag_mem) {
			/* Enable Z compression */
			tile->zcomp = tile->tag_mem->start;
			if (dev_priv->chipset >= 0x25) {
				if (bpp == 16)
					tile->zcomp |= NV25_PFB_ZCOMP_MODE_16;
				else
					tile->zcomp |= NV25_PFB_ZCOMP_MODE_32;
			} else {
				tile->zcomp |= NV20_PFB_ZCOMP_EN;
				if (bpp != 16)
					tile->zcomp |= NV20_PFB_ZCOMP_MODE_32;
			}
		}

		tile->addr |= 2;
	}
}

void
nv20_fb_free_tile_region(struct drm_device *dev, int i)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];

	tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
	nv20_fb_free_tag(dev, &tile->tag_mem);
}

void
nv20_fb_set_tile_region(struct drm_device *dev, int i)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];

	nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
	nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
	nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
	nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
}

int
nv20_fb_vram_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	u32 mem_size = nv_rd32(dev, 0x10020c);
	u32 pbus1218 = nv_rd32(dev, 0x001218);

	dev_priv->vram_size = mem_size & 0xff000000;
	switch (pbus1218 & 0x00000300) {
	case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
	case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
	case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
	case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_GDDR2; break;
	}

	return 0;
}

int
nv20_fb_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
	int i;

	if (dev_priv->chipset >= 0x25)
		drm_mm_init(&pfb->tag_heap, 0, 64 * 1024);
	else
		drm_mm_init(&pfb->tag_heap, 0, 32 * 1024);

	/* Turn all the tiling regions off. */
	pfb->num_tiles = NV10_PFB_TILE__SIZE;
	for (i = 0; i < pfb->num_tiles; i++)
		pfb->set_tile_region(dev, i);

	return 0;
}

void
nv20_fb_takedown(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
	int i;

	for (i = 0; i < pfb->num_tiles; i++)
		pfb->free_tile_region(dev, i);

	drm_mm_takedown(&pfb->tag_heap);
}