]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/nouveau/nouveau_sgdma.c
Merge branch 'ebt_config_compat_v4' of git://git.breakpoint.cc/fw/nf-next-2.6
[net-next-2.6.git] / drivers / gpu / drm / nouveau / nouveau_sgdma.c
CommitLineData
6ee73861
BS
1#include "drmP.h"
2#include "nouveau_drv.h"
3#include <linux/pagemap.h>
4
5#define NV_CTXDMA_PAGE_SHIFT 12
6#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
7#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
8
9struct nouveau_sgdma_be {
10 struct ttm_backend backend;
11 struct drm_device *dev;
12
13 dma_addr_t *pages;
14 unsigned nr_pages;
15
16 unsigned pte_start;
17 bool bound;
18};
19
20static int
21nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
22 struct page **pages, struct page *dummy_read_page)
23{
24 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
25 struct drm_device *dev = nvbe->dev;
26
27 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
28
29 if (nvbe->pages)
30 return -EINVAL;
31
32 nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
33 if (!nvbe->pages)
34 return -ENOMEM;
35
36 nvbe->nr_pages = 0;
37 while (num_pages--) {
38 nvbe->pages[nvbe->nr_pages] =
39 pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
40 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
41 if (pci_dma_mapping_error(dev->pdev,
42 nvbe->pages[nvbe->nr_pages])) {
43 be->func->clear(be);
44 return -EFAULT;
45 }
46
47 nvbe->nr_pages++;
48 }
49
50 return 0;
51}
52
53static void
54nouveau_sgdma_clear(struct ttm_backend *be)
55{
56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
dd19e44b 57 struct drm_device *dev;
6ee73861
BS
58
59 if (nvbe && nvbe->pages) {
dd19e44b
MS
60 dev = nvbe->dev;
61 NV_DEBUG(dev, "\n");
62
6ee73861
BS
63 if (nvbe->bound)
64 be->func->unbind(be);
65
66 while (nvbe->nr_pages--) {
67 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
68 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
69 }
70 kfree(nvbe->pages);
71 nvbe->pages = NULL;
72 nvbe->nr_pages = 0;
73 }
74}
75
76static inline unsigned
77nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
78{
79 struct drm_nouveau_private *dev_priv = dev->dev_private;
80 unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
81
82 if (dev_priv->card_type < NV_50)
83 return pte + 2;
84
85 return pte << 1;
86}
87
88static int
89nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
90{
91 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
92 struct drm_device *dev = nvbe->dev;
93 struct drm_nouveau_private *dev_priv = dev->dev_private;
94 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
95 unsigned i, j, pte;
96
97 NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
98
99 dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
100 pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
101 nvbe->pte_start = pte;
102 for (i = 0; i < nvbe->nr_pages; i++) {
103 dma_addr_t dma_offset = nvbe->pages[i];
104 uint32_t offset_l = lower_32_bits(dma_offset);
105 uint32_t offset_h = upper_32_bits(dma_offset);
106
107 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
108 if (dev_priv->card_type < NV_50)
109 nv_wo32(dev, gpuobj, pte++, offset_l | 3);
110 else {
111 nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
112 nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
113 }
114
115 dma_offset += NV_CTXDMA_PAGE_SIZE;
116 }
117 }
118 dev_priv->engine.instmem.finish_access(nvbe->dev);
119
120 if (dev_priv->card_type == NV_50) {
121 nv_wr32(dev, 0x100c80, 0x00050001);
122 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
123 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
124 NV_ERROR(dev, "0x100c80 = 0x%08x\n",
125 nv_rd32(dev, 0x100c80));
126 return -EBUSY;
127 }
128
129 nv_wr32(dev, 0x100c80, 0x00000001);
130 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
131 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
132 NV_ERROR(dev, "0x100c80 = 0x%08x\n",
133 nv_rd32(dev, 0x100c80));
134 return -EBUSY;
135 }
136 }
137
138 nvbe->bound = true;
139 return 0;
140}
141
142static int
143nouveau_sgdma_unbind(struct ttm_backend *be)
144{
145 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
146 struct drm_device *dev = nvbe->dev;
147 struct drm_nouveau_private *dev_priv = dev->dev_private;
148 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
149 unsigned i, j, pte;
150
151 NV_DEBUG(dev, "\n");
152
153 if (!nvbe->bound)
154 return 0;
155
156 dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
157 pte = nvbe->pte_start;
158 for (i = 0; i < nvbe->nr_pages; i++) {
159 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
160
161 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
162 if (dev_priv->card_type < NV_50)
163 nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
164 else {
165 nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
166 nv_wo32(dev, gpuobj, pte++, 0x00000000);
167 }
168
169 dma_offset += NV_CTXDMA_PAGE_SIZE;
170 }
171 }
172 dev_priv->engine.instmem.finish_access(nvbe->dev);
173
174 nvbe->bound = false;
175 return 0;
176}
177
178static void
179nouveau_sgdma_destroy(struct ttm_backend *be)
180{
181 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
182
183 if (be) {
184 NV_DEBUG(nvbe->dev, "\n");
185
186 if (nvbe) {
187 if (nvbe->pages)
188 be->func->clear(be);
189 kfree(nvbe);
190 }
191 }
192}
193
194static struct ttm_backend_func nouveau_sgdma_backend = {
195 .populate = nouveau_sgdma_populate,
196 .clear = nouveau_sgdma_clear,
197 .bind = nouveau_sgdma_bind,
198 .unbind = nouveau_sgdma_unbind,
199 .destroy = nouveau_sgdma_destroy
200};
201
202struct ttm_backend *
203nouveau_sgdma_init_ttm(struct drm_device *dev)
204{
205 struct drm_nouveau_private *dev_priv = dev->dev_private;
206 struct nouveau_sgdma_be *nvbe;
207
208 if (!dev_priv->gart_info.sg_ctxdma)
209 return NULL;
210
211 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
212 if (!nvbe)
213 return NULL;
214
215 nvbe->dev = dev;
216
217 nvbe->backend.func = &nouveau_sgdma_backend;
218
219 return &nvbe->backend;
220}
221
222int
223nouveau_sgdma_init(struct drm_device *dev)
224{
225 struct drm_nouveau_private *dev_priv = dev->dev_private;
226 struct nouveau_gpuobj *gpuobj = NULL;
227 uint32_t aper_size, obj_size;
228 int i, ret;
229
230 if (dev_priv->card_type < NV_50) {
231 aper_size = (64 * 1024 * 1024);
232 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
233 obj_size += 8; /* ctxdma header */
234 } else {
235 /* 1 entire VM page table */
236 aper_size = (512 * 1024 * 1024);
237 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
238 }
239
240 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
241 NVOBJ_FLAG_ALLOW_NO_REFS |
242 NVOBJ_FLAG_ZERO_ALLOC |
243 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
244 if (ret) {
245 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
246 return ret;
247 }
248
249 dev_priv->gart_info.sg_dummy_page =
250 alloc_page(GFP_KERNEL|__GFP_DMA32);
251 set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
252 dev_priv->gart_info.sg_dummy_bus =
253 pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
254 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
255
256 dev_priv->engine.instmem.prepare_access(dev, true);
257 if (dev_priv->card_type < NV_50) {
258 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
259 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
260 * on those cards? */
261 nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
262 (1 << 12) /* PT present */ |
263 (0 << 13) /* PT *not* linear */ |
264 (NV_DMA_ACCESS_RW << 14) |
265 (NV_DMA_TARGET_PCI << 16));
266 nv_wo32(dev, gpuobj, 1, aper_size - 1);
267 for (i = 2; i < 2 + (aper_size >> 12); i++) {
268 nv_wo32(dev, gpuobj, i,
269 dev_priv->gart_info.sg_dummy_bus | 3);
270 }
271 } else {
272 for (i = 0; i < obj_size; i += 8) {
273 nv_wo32(dev, gpuobj, (i+0)/4,
274 dev_priv->gart_info.sg_dummy_bus | 0x21);
275 nv_wo32(dev, gpuobj, (i+4)/4, 0);
276 }
277 }
278 dev_priv->engine.instmem.finish_access(dev);
279
280 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
281 dev_priv->gart_info.aper_base = 0;
282 dev_priv->gart_info.aper_size = aper_size;
283 dev_priv->gart_info.sg_ctxdma = gpuobj;
284 return 0;
285}
286
287void
288nouveau_sgdma_takedown(struct drm_device *dev)
289{
290 struct drm_nouveau_private *dev_priv = dev->dev_private;
291
292 if (dev_priv->gart_info.sg_dummy_page) {
293 pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
294 NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
295 unlock_page(dev_priv->gart_info.sg_dummy_page);
296 __free_page(dev_priv->gart_info.sg_dummy_page);
297 dev_priv->gart_info.sg_dummy_page = NULL;
298 dev_priv->gart_info.sg_dummy_bus = 0;
299 }
300
301 nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
302}
303
304int
305nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
306{
307 struct drm_nouveau_private *dev_priv = dev->dev_private;
308 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
309 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
310 int pte;
311
312 pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
313 if (dev_priv->card_type < NV_50) {
314 instmem->prepare_access(dev, false);
315 *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
316 instmem->finish_access(dev);
317 return 0;
318 }
319
320 NV_ERROR(dev, "Unimplemented on NV50\n");
321 return -EINVAL;
322}