]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/nouveau/nvc0_instmem.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[net-next-2.6.git] / drivers / gpu / drm / nouveau / nvc0_instmem.c
CommitLineData
4b223eef
BS
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28
29int
30nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
31 uint32_t *size)
32{
68b83a93
BS
33 int ret;
34
35 *size = ALIGN(*size, 4096);
36 if (*size == 0)
37 return -EINVAL;
38
39 ret = nouveau_bo_new(dev, NULL, *size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
40 true, false, &gpuobj->im_backing);
41 if (ret) {
42 NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
43 return ret;
44 }
45
46 ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
47 if (ret) {
48 NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
49 nouveau_bo_ref(NULL, &gpuobj->im_backing);
50 return ret;
51 }
52
53 gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
54 gpuobj->im_backing_start <<= PAGE_SHIFT;
4b223eef
BS
55 return 0;
56}
57
58void
59nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
60{
68b83a93
BS
61 struct drm_nouveau_private *dev_priv = dev->dev_private;
62
63 if (gpuobj && gpuobj->im_backing) {
64 if (gpuobj->im_bound)
65 dev_priv->engine.instmem.unbind(dev, gpuobj);
66 nouveau_bo_unpin(gpuobj->im_backing);
67 nouveau_bo_ref(NULL, &gpuobj->im_backing);
68 gpuobj->im_backing = NULL;
69 }
4b223eef
BS
70}
71
72int
73nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
74{
68b83a93
BS
75 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 uint32_t pte, pte_end;
77 uint64_t vram;
78
79 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
80 return -EINVAL;
81
82 NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
83 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
84
85 pte = gpuobj->im_pramin->start >> 12;
86 pte_end = (gpuobj->im_pramin->size >> 12) + pte;
87 vram = gpuobj->im_backing_start;
88
89 NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
90 gpuobj->im_pramin->start, pte, pte_end);
91 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
92
93 while (pte < pte_end) {
94 nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1);
95 nv_wr32(dev, 0x702004 + (pte * 8), 0);
96 vram += 4096;
97 pte++;
98 }
99 dev_priv->engine.instmem.flush(dev);
100
101 if (1) {
102 u32 chan = nv_rd32(dev, 0x1700) << 16;
103 nv_wr32(dev, 0x100cb8, (chan + 0x1000) >> 8);
104 nv_wr32(dev, 0x100cbc, 0x80000005);
105 }
106
107 gpuobj->im_bound = 1;
4b223eef
BS
108 return 0;
109}
110
111int
112nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
113{
68b83a93
BS
114 struct drm_nouveau_private *dev_priv = dev->dev_private;
115 uint32_t pte, pte_end;
116
117 if (gpuobj->im_bound == 0)
118 return -EINVAL;
119
120 pte = gpuobj->im_pramin->start >> 12;
121 pte_end = (gpuobj->im_pramin->size >> 12) + pte;
122 while (pte < pte_end) {
123 nv_wr32(dev, 0x702000 + (pte * 8), 0);
124 nv_wr32(dev, 0x702004 + (pte * 8), 0);
125 pte++;
126 }
127 dev_priv->engine.instmem.flush(dev);
128
129 gpuobj->im_bound = 0;
4b223eef
BS
130 return 0;
131}
132
133void
134nvc0_instmem_flush(struct drm_device *dev)
135{
68b83a93 136 nv_wr32(dev, 0x070000, 1);
2dc5d2ec 137 if (!nv_wait(0x070000, 0x00000002, 0x00000000))
68b83a93 138 NV_ERROR(dev, "PRAMIN flush timeout\n");
4b223eef
BS
139}
140
141int
142nvc0_instmem_suspend(struct drm_device *dev)
143{
147cad09 144 struct drm_nouveau_private *dev_priv = dev->dev_private;
b515f3a2 145 u32 *buf;
147cad09
BS
146 int i;
147
148 dev_priv->susres.ramin_copy = vmalloc(65536);
149 if (!dev_priv->susres.ramin_copy)
150 return -ENOMEM;
b515f3a2 151 buf = dev_priv->susres.ramin_copy;
147cad09 152
b515f3a2
BS
153 for (i = 0; i < 65536; i += 4)
154 buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
4b223eef
BS
155 return 0;
156}
157
158void
159nvc0_instmem_resume(struct drm_device *dev)
160{
147cad09 161 struct drm_nouveau_private *dev_priv = dev->dev_private;
b515f3a2 162 u32 *buf = dev_priv->susres.ramin_copy;
147cad09
BS
163 u64 chan;
164 int i;
165
166 chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
167 nv_wr32(dev, 0x001700, chan >> 16);
168
b515f3a2
BS
169 for (i = 0; i < 65536; i += 4)
170 nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
147cad09
BS
171 vfree(dev_priv->susres.ramin_copy);
172 dev_priv->susres.ramin_copy = NULL;
173
174 nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
4b223eef
BS
175}
176
177int
178nvc0_instmem_init(struct drm_device *dev)
179{
68b83a93
BS
180 struct drm_nouveau_private *dev_priv = dev->dev_private;
181 u64 chan, pgt3, imem, lim3 = dev_priv->ramin_size - 1;
182 int ret, i;
183
184 dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
185 chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
186 imem = 4096 + 4096 + 32768;
187
188 nv_wr32(dev, 0x001700, chan >> 16);
189
190 /* channel setup */
191 nv_wr32(dev, 0x700200, lower_32_bits(chan + 0x1000));
192 nv_wr32(dev, 0x700204, upper_32_bits(chan + 0x1000));
193 nv_wr32(dev, 0x700208, lower_32_bits(lim3));
194 nv_wr32(dev, 0x70020c, upper_32_bits(lim3));
195
196 /* point pgd -> pgt */
197 nv_wr32(dev, 0x701000, 0);
198 nv_wr32(dev, 0x701004, ((chan + 0x2000) >> 8) | 1);
199
200 /* point pgt -> physical vram for channel */
201 pgt3 = 0x2000;
202 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4096, pgt3 += 8) {
203 nv_wr32(dev, 0x700000 + pgt3, ((chan + i) >> 8) | 1);
204 nv_wr32(dev, 0x700004 + pgt3, 0);
205 }
206
207 /* clear rest of pgt */
208 for (; i < dev_priv->ramin_size; i += 4096, pgt3 += 8) {
209 nv_wr32(dev, 0x700000 + pgt3, 0);
210 nv_wr32(dev, 0x700004 + pgt3, 0);
211 }
212
213 /* point bar3 at the channel */
214 nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
215
216 /* Global PRAMIN heap */
217 ret = drm_mm_init(&dev_priv->ramin_heap, imem,
218 dev_priv->ramin_size - imem);
219 if (ret) {
220 NV_ERROR(dev, "Failed to init RAMIN heap\n");
221 return -ENOMEM;
222 }
223
224 /*XXX: incorrect, but needed to make hash func "work" */
225 dev_priv->ramht_offset = 0x10000;
226 dev_priv->ramht_bits = 9;
46d4cae2 227 dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
4b223eef
BS
228 return 0;
229}
230
231void
232nvc0_instmem_takedown(struct drm_device *dev)
233{
234}
235