]>
Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright 2007 Dave Airlied | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | */ | |
24 | /* | |
25 | * Authors: Dave Airlied <airlied@linux.ie> | |
26 | * Ben Skeggs <darktama@iinet.net.au> | |
27 | * Jeremy Kolb <jkolb@brandeis.edu> | |
28 | */ | |
29 | ||
30 | #include "drmP.h" | |
31 | ||
32 | #include "nouveau_drm.h" | |
33 | #include "nouveau_drv.h" | |
34 | #include "nouveau_dma.h" | |
35 | ||
a510604d MM |
36 | #include <linux/log2.h> |
37 | ||
6ee73861 BS |
38 | static void |
39 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |
40 | { | |
41 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
a0af9add | 42 | struct drm_device *dev = dev_priv->dev; |
6ee73861 BS |
43 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
44 | ||
45 | ttm_bo_kunmap(&nvbo->kmap); | |
46 | ||
47 | if (unlikely(nvbo->gem)) | |
48 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | |
49 | ||
a0af9add FJ |
50 | if (nvbo->tile) |
51 | nv10_mem_expire_tiling(dev, nvbo->tile, NULL); | |
52 | ||
6ee73861 BS |
53 | spin_lock(&dev_priv->ttm.bo_list_lock); |
54 | list_del(&nvbo->head); | |
55 | spin_unlock(&dev_priv->ttm.bo_list_lock); | |
56 | kfree(nvbo); | |
57 | } | |
58 | ||
a0af9add FJ |
59 | static void |
60 | nouveau_bo_fixup_align(struct drm_device *dev, | |
61 | uint32_t tile_mode, uint32_t tile_flags, | |
62 | int *align, int *size) | |
63 | { | |
64 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
65 | ||
66 | /* | |
67 | * Some of the tile_flags have a periodic structure of N*4096 bytes, | |
eb1dba0e MM |
68 | * align to to that as well as the page size. Align the size to the |
69 | * appropriate boundaries. This does imply that sizes are rounded up | |
70 | * 3-7 pages, so be aware of this and do not waste memory by allocating | |
71 | * many small buffers. | |
a0af9add FJ |
72 | */ |
73 | if (dev_priv->card_type == NV_50) { | |
a510604d MM |
74 | uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; |
75 | int i; | |
76 | ||
a0af9add FJ |
77 | switch (tile_flags) { |
78 | case 0x1800: | |
79 | case 0x2800: | |
80 | case 0x4800: | |
81 | case 0x7a00: | |
a510604d | 82 | if (is_power_of_2(block_size)) { |
a510604d MM |
83 | for (i = 1; i < 10; i++) { |
84 | *align = 12 * i * block_size; | |
85 | if (!(*align % 65536)) | |
86 | break; | |
87 | } | |
a0af9add | 88 | } else { |
a510604d MM |
89 | for (i = 1; i < 10; i++) { |
90 | *align = 8 * i * block_size; | |
91 | if (!(*align % 65536)) | |
92 | break; | |
93 | } | |
a0af9add | 94 | } |
eb1dba0e | 95 | *size = roundup(*size, *align); |
a0af9add FJ |
96 | break; |
97 | default: | |
98 | break; | |
99 | } | |
100 | ||
101 | } else { | |
102 | if (tile_mode) { | |
103 | if (dev_priv->chipset >= 0x40) { | |
104 | *align = 65536; | |
105 | *size = roundup(*size, 64 * tile_mode); | |
106 | ||
107 | } else if (dev_priv->chipset >= 0x30) { | |
108 | *align = 32768; | |
109 | *size = roundup(*size, 64 * tile_mode); | |
110 | ||
111 | } else if (dev_priv->chipset >= 0x20) { | |
112 | *align = 16384; | |
113 | *size = roundup(*size, 64 * tile_mode); | |
114 | ||
115 | } else if (dev_priv->chipset >= 0x10) { | |
116 | *align = 16384; | |
117 | *size = roundup(*size, 32 * tile_mode); | |
118 | } | |
119 | } | |
120 | } | |
121 | ||
1c7059e4 MM |
122 | /* ALIGN works only on powers of two. */ |
123 | *size = roundup(*size, PAGE_SIZE); | |
a0af9add FJ |
124 | |
125 | if (dev_priv->card_type == NV_50) { | |
1c7059e4 | 126 | *size = roundup(*size, 65536); |
a0af9add FJ |
127 | *align = max(65536, *align); |
128 | } | |
129 | } | |
130 | ||
6ee73861 BS |
131 | int |
132 | nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |
133 | int size, int align, uint32_t flags, uint32_t tile_mode, | |
134 | uint32_t tile_flags, bool no_vm, bool mappable, | |
135 | struct nouveau_bo **pnvbo) | |
136 | { | |
137 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
138 | struct nouveau_bo *nvbo; | |
8dea4a19 | 139 | int ret = 0; |
6ee73861 BS |
140 | |
141 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); | |
142 | if (!nvbo) | |
143 | return -ENOMEM; | |
144 | INIT_LIST_HEAD(&nvbo->head); | |
145 | INIT_LIST_HEAD(&nvbo->entry); | |
146 | nvbo->mappable = mappable; | |
147 | nvbo->no_vm = no_vm; | |
148 | nvbo->tile_mode = tile_mode; | |
149 | nvbo->tile_flags = tile_flags; | |
150 | ||
a0af9add | 151 | nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size); |
6ee73861 BS |
152 | align >>= PAGE_SHIFT; |
153 | ||
6ee73861 BS |
154 | nvbo->placement.fpfn = 0; |
155 | nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; | |
78ad0f7b | 156 | nouveau_bo_placement_set(nvbo, flags, 0); |
6ee73861 BS |
157 | |
158 | nvbo->channel = chan; | |
6ee73861 BS |
159 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, |
160 | ttm_bo_type_device, &nvbo->placement, align, 0, | |
161 | false, NULL, size, nouveau_bo_del_ttm); | |
162 | nvbo->channel = NULL; | |
163 | if (ret) { | |
164 | /* ttm will call nouveau_bo_del_ttm if it fails.. */ | |
165 | return ret; | |
166 | } | |
167 | ||
168 | spin_lock(&dev_priv->ttm.bo_list_lock); | |
169 | list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list); | |
170 | spin_unlock(&dev_priv->ttm.bo_list_lock); | |
171 | *pnvbo = nvbo; | |
172 | return 0; | |
173 | } | |
174 | ||
78ad0f7b FJ |
175 | static void |
176 | set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) | |
177 | { | |
178 | *n = 0; | |
179 | ||
180 | if (type & TTM_PL_FLAG_VRAM) | |
181 | pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; | |
182 | if (type & TTM_PL_FLAG_TT) | |
183 | pl[(*n)++] = TTM_PL_FLAG_TT | flags; | |
184 | if (type & TTM_PL_FLAG_SYSTEM) | |
185 | pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; | |
186 | } | |
187 | ||
6ee73861 | 188 | void |
78ad0f7b | 189 | nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) |
6ee73861 | 190 | { |
78ad0f7b FJ |
191 | struct ttm_placement *pl = &nvbo->placement; |
192 | uint32_t flags = TTM_PL_MASK_CACHING | | |
193 | (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); | |
194 | ||
195 | pl->placement = nvbo->placements; | |
196 | set_placement_list(nvbo->placements, &pl->num_placement, | |
197 | type, flags); | |
198 | ||
199 | pl->busy_placement = nvbo->busy_placements; | |
200 | set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, | |
201 | type | busy, flags); | |
6ee73861 BS |
202 | } |
203 | ||
204 | int | |
205 | nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) | |
206 | { | |
207 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | |
208 | struct ttm_buffer_object *bo = &nvbo->bo; | |
78ad0f7b | 209 | int ret; |
6ee73861 BS |
210 | |
211 | if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { | |
212 | NV_ERROR(nouveau_bdev(bo->bdev)->dev, | |
213 | "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, | |
214 | 1 << bo->mem.mem_type, memtype); | |
215 | return -EINVAL; | |
216 | } | |
217 | ||
218 | if (nvbo->pin_refcnt++) | |
219 | return 0; | |
220 | ||
221 | ret = ttm_bo_reserve(bo, false, false, false, 0); | |
222 | if (ret) | |
223 | goto out; | |
224 | ||
78ad0f7b | 225 | nouveau_bo_placement_set(nvbo, memtype, 0); |
6ee73861 BS |
226 | |
227 | ret = ttm_bo_validate(bo, &nvbo->placement, false, false); | |
228 | if (ret == 0) { | |
229 | switch (bo->mem.mem_type) { | |
230 | case TTM_PL_VRAM: | |
231 | dev_priv->fb_aper_free -= bo->mem.size; | |
232 | break; | |
233 | case TTM_PL_TT: | |
234 | dev_priv->gart_info.aper_free -= bo->mem.size; | |
235 | break; | |
236 | default: | |
237 | break; | |
238 | } | |
239 | } | |
240 | ttm_bo_unreserve(bo); | |
241 | out: | |
242 | if (unlikely(ret)) | |
243 | nvbo->pin_refcnt--; | |
244 | return ret; | |
245 | } | |
246 | ||
247 | int | |
248 | nouveau_bo_unpin(struct nouveau_bo *nvbo) | |
249 | { | |
250 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | |
251 | struct ttm_buffer_object *bo = &nvbo->bo; | |
78ad0f7b | 252 | int ret; |
6ee73861 BS |
253 | |
254 | if (--nvbo->pin_refcnt) | |
255 | return 0; | |
256 | ||
257 | ret = ttm_bo_reserve(bo, false, false, false, 0); | |
258 | if (ret) | |
259 | return ret; | |
260 | ||
78ad0f7b | 261 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); |
6ee73861 BS |
262 | |
263 | ret = ttm_bo_validate(bo, &nvbo->placement, false, false); | |
264 | if (ret == 0) { | |
265 | switch (bo->mem.mem_type) { | |
266 | case TTM_PL_VRAM: | |
267 | dev_priv->fb_aper_free += bo->mem.size; | |
268 | break; | |
269 | case TTM_PL_TT: | |
270 | dev_priv->gart_info.aper_free += bo->mem.size; | |
271 | break; | |
272 | default: | |
273 | break; | |
274 | } | |
275 | } | |
276 | ||
277 | ttm_bo_unreserve(bo); | |
278 | return ret; | |
279 | } | |
280 | ||
281 | int | |
282 | nouveau_bo_map(struct nouveau_bo *nvbo) | |
283 | { | |
284 | int ret; | |
285 | ||
286 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); | |
287 | if (ret) | |
288 | return ret; | |
289 | ||
290 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); | |
291 | ttm_bo_unreserve(&nvbo->bo); | |
292 | return ret; | |
293 | } | |
294 | ||
295 | void | |
296 | nouveau_bo_unmap(struct nouveau_bo *nvbo) | |
297 | { | |
298 | ttm_bo_kunmap(&nvbo->kmap); | |
299 | } | |
300 | ||
301 | u16 | |
302 | nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) | |
303 | { | |
304 | bool is_iomem; | |
305 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
306 | mem = &mem[index]; | |
307 | if (is_iomem) | |
308 | return ioread16_native((void __force __iomem *)mem); | |
309 | else | |
310 | return *mem; | |
311 | } | |
312 | ||
313 | void | |
314 | nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) | |
315 | { | |
316 | bool is_iomem; | |
317 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
318 | mem = &mem[index]; | |
319 | if (is_iomem) | |
320 | iowrite16_native(val, (void __force __iomem *)mem); | |
321 | else | |
322 | *mem = val; | |
323 | } | |
324 | ||
325 | u32 | |
326 | nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) | |
327 | { | |
328 | bool is_iomem; | |
329 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
330 | mem = &mem[index]; | |
331 | if (is_iomem) | |
332 | return ioread32_native((void __force __iomem *)mem); | |
333 | else | |
334 | return *mem; | |
335 | } | |
336 | ||
337 | void | |
338 | nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) | |
339 | { | |
340 | bool is_iomem; | |
341 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
342 | mem = &mem[index]; | |
343 | if (is_iomem) | |
344 | iowrite32_native(val, (void __force __iomem *)mem); | |
345 | else | |
346 | *mem = val; | |
347 | } | |
348 | ||
349 | static struct ttm_backend * | |
350 | nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev) | |
351 | { | |
352 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | |
353 | struct drm_device *dev = dev_priv->dev; | |
354 | ||
355 | switch (dev_priv->gart_info.type) { | |
b694dfb2 | 356 | #if __OS_HAS_AGP |
6ee73861 BS |
357 | case NOUVEAU_GART_AGP: |
358 | return ttm_agp_backend_init(bdev, dev->agp->bridge); | |
b694dfb2 | 359 | #endif |
6ee73861 BS |
360 | case NOUVEAU_GART_SGDMA: |
361 | return nouveau_sgdma_init_ttm(dev); | |
362 | default: | |
363 | NV_ERROR(dev, "Unknown GART type %d\n", | |
364 | dev_priv->gart_info.type); | |
365 | break; | |
366 | } | |
367 | ||
368 | return NULL; | |
369 | } | |
370 | ||
371 | static int | |
372 | nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | |
373 | { | |
374 | /* We'll do this from user space. */ | |
375 | return 0; | |
376 | } | |
377 | ||
378 | static int | |
379 | nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
380 | struct ttm_mem_type_manager *man) | |
381 | { | |
382 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | |
383 | struct drm_device *dev = dev_priv->dev; | |
384 | ||
385 | switch (type) { | |
386 | case TTM_PL_SYSTEM: | |
387 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
388 | man->available_caching = TTM_PL_MASK_CACHING; | |
389 | man->default_caching = TTM_PL_FLAG_CACHED; | |
390 | break; | |
391 | case TTM_PL_VRAM: | |
392 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | |
393 | TTM_MEMTYPE_FLAG_MAPPABLE | | |
394 | TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; | |
395 | man->available_caching = TTM_PL_FLAG_UNCACHED | | |
396 | TTM_PL_FLAG_WC; | |
397 | man->default_caching = TTM_PL_FLAG_WC; | |
398 | ||
399 | man->io_addr = NULL; | |
400 | man->io_offset = drm_get_resource_start(dev, 1); | |
401 | man->io_size = drm_get_resource_len(dev, 1); | |
402 | if (man->io_size > nouveau_mem_fb_amount(dev)) | |
403 | man->io_size = nouveau_mem_fb_amount(dev); | |
404 | ||
405 | man->gpu_offset = dev_priv->vm_vram_base; | |
406 | break; | |
407 | case TTM_PL_TT: | |
408 | switch (dev_priv->gart_info.type) { | |
409 | case NOUVEAU_GART_AGP: | |
410 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | | |
411 | TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; | |
412 | man->available_caching = TTM_PL_FLAG_UNCACHED; | |
413 | man->default_caching = TTM_PL_FLAG_UNCACHED; | |
414 | break; | |
415 | case NOUVEAU_GART_SGDMA: | |
416 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | | |
417 | TTM_MEMTYPE_FLAG_CMA; | |
418 | man->available_caching = TTM_PL_MASK_CACHING; | |
419 | man->default_caching = TTM_PL_FLAG_CACHED; | |
420 | break; | |
421 | default: | |
422 | NV_ERROR(dev, "Unknown GART type: %d\n", | |
423 | dev_priv->gart_info.type); | |
424 | return -EINVAL; | |
425 | } | |
426 | ||
427 | man->io_offset = dev_priv->gart_info.aper_base; | |
428 | man->io_size = dev_priv->gart_info.aper_size; | |
429 | man->io_addr = NULL; | |
430 | man->gpu_offset = dev_priv->vm_gart_base; | |
431 | break; | |
432 | default: | |
433 | NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); | |
434 | return -EINVAL; | |
435 | } | |
436 | return 0; | |
437 | } | |
438 | ||
439 | static void | |
440 | nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | |
441 | { | |
442 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
443 | ||
444 | switch (bo->mem.mem_type) { | |
22fbd538 | 445 | case TTM_PL_VRAM: |
78ad0f7b FJ |
446 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, |
447 | TTM_PL_FLAG_SYSTEM); | |
22fbd538 | 448 | break; |
6ee73861 | 449 | default: |
78ad0f7b | 450 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); |
6ee73861 BS |
451 | break; |
452 | } | |
22fbd538 FJ |
453 | |
454 | *pl = nvbo->placement; | |
6ee73861 BS |
455 | } |
456 | ||
457 | ||
458 | /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access | |
459 | * TTM_PL_{VRAM,TT} directly. | |
460 | */ | |
a0af9add | 461 | |
6ee73861 BS |
462 | static int |
463 | nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, | |
464 | struct nouveau_bo *nvbo, bool evict, bool no_wait, | |
465 | struct ttm_mem_reg *new_mem) | |
466 | { | |
467 | struct nouveau_fence *fence = NULL; | |
468 | int ret; | |
469 | ||
470 | ret = nouveau_fence_new(chan, &fence, true); | |
471 | if (ret) | |
472 | return ret; | |
473 | ||
474 | ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, | |
475 | evict, no_wait, new_mem); | |
e147eae8 BS |
476 | if (nvbo->channel && nvbo->channel != chan) |
477 | ret = nouveau_fence_wait(fence, NULL, false, false); | |
6ee73861 BS |
478 | nouveau_fence_unref((void *)&fence); |
479 | return ret; | |
480 | } | |
481 | ||
482 | static inline uint32_t | |
483 | nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan, | |
484 | struct ttm_mem_reg *mem) | |
485 | { | |
486 | if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) { | |
487 | if (mem->mem_type == TTM_PL_TT) | |
488 | return NvDmaGART; | |
489 | return NvDmaVRAM; | |
490 | } | |
491 | ||
492 | if (mem->mem_type == TTM_PL_TT) | |
493 | return chan->gart_handle; | |
494 | return chan->vram_handle; | |
495 | } | |
496 | ||
497 | static int | |
a0af9add FJ |
498 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, |
499 | int no_wait, struct ttm_mem_reg *new_mem) | |
6ee73861 BS |
500 | { |
501 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
502 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
a0af9add | 503 | struct ttm_mem_reg *old_mem = &bo->mem; |
6ee73861 BS |
504 | struct nouveau_channel *chan; |
505 | uint64_t src_offset, dst_offset; | |
506 | uint32_t page_count; | |
507 | int ret; | |
508 | ||
509 | chan = nvbo->channel; | |
0735f62e | 510 | if (!chan || nvbo->tile_flags || nvbo->no_vm) |
6ee73861 | 511 | chan = dev_priv->channel; |
6ee73861 BS |
512 | |
513 | src_offset = old_mem->mm_node->start << PAGE_SHIFT; | |
514 | dst_offset = new_mem->mm_node->start << PAGE_SHIFT; | |
515 | if (chan != dev_priv->channel) { | |
516 | if (old_mem->mem_type == TTM_PL_TT) | |
517 | src_offset += dev_priv->vm_gart_base; | |
518 | else | |
519 | src_offset += dev_priv->vm_vram_base; | |
520 | ||
521 | if (new_mem->mem_type == TTM_PL_TT) | |
522 | dst_offset += dev_priv->vm_gart_base; | |
523 | else | |
524 | dst_offset += dev_priv->vm_vram_base; | |
525 | } | |
526 | ||
527 | ret = RING_SPACE(chan, 3); | |
528 | if (ret) | |
529 | return ret; | |
530 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); | |
531 | OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem)); | |
532 | OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem)); | |
533 | ||
534 | if (dev_priv->card_type >= NV_50) { | |
535 | ret = RING_SPACE(chan, 4); | |
536 | if (ret) | |
537 | return ret; | |
538 | BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); | |
539 | OUT_RING(chan, 1); | |
540 | BEGIN_RING(chan, NvSubM2MF, 0x021c, 1); | |
541 | OUT_RING(chan, 1); | |
542 | } | |
543 | ||
544 | page_count = new_mem->num_pages; | |
545 | while (page_count) { | |
546 | int line_count = (page_count > 2047) ? 2047 : page_count; | |
547 | ||
548 | if (dev_priv->card_type >= NV_50) { | |
549 | ret = RING_SPACE(chan, 3); | |
550 | if (ret) | |
551 | return ret; | |
552 | BEGIN_RING(chan, NvSubM2MF, 0x0238, 2); | |
553 | OUT_RING(chan, upper_32_bits(src_offset)); | |
554 | OUT_RING(chan, upper_32_bits(dst_offset)); | |
555 | } | |
556 | ret = RING_SPACE(chan, 11); | |
557 | if (ret) | |
558 | return ret; | |
559 | BEGIN_RING(chan, NvSubM2MF, | |
560 | NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); | |
561 | OUT_RING(chan, lower_32_bits(src_offset)); | |
562 | OUT_RING(chan, lower_32_bits(dst_offset)); | |
563 | OUT_RING(chan, PAGE_SIZE); /* src_pitch */ | |
564 | OUT_RING(chan, PAGE_SIZE); /* dst_pitch */ | |
565 | OUT_RING(chan, PAGE_SIZE); /* line_length */ | |
566 | OUT_RING(chan, line_count); | |
567 | OUT_RING(chan, (1<<8)|(1<<0)); | |
568 | OUT_RING(chan, 0); | |
569 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); | |
570 | OUT_RING(chan, 0); | |
571 | ||
572 | page_count -= line_count; | |
573 | src_offset += (PAGE_SIZE * line_count); | |
574 | dst_offset += (PAGE_SIZE * line_count); | |
575 | } | |
576 | ||
577 | return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem); | |
578 | } | |
579 | ||
580 | static int | |
581 | nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |
582 | bool no_wait, struct ttm_mem_reg *new_mem) | |
583 | { | |
584 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
585 | struct ttm_placement placement; | |
586 | struct ttm_mem_reg tmp_mem; | |
587 | int ret; | |
588 | ||
589 | placement.fpfn = placement.lpfn = 0; | |
590 | placement.num_placement = placement.num_busy_placement = 1; | |
77e2b5ed | 591 | placement.placement = placement.busy_placement = &placement_memtype; |
6ee73861 BS |
592 | |
593 | tmp_mem = *new_mem; | |
594 | tmp_mem.mm_node = NULL; | |
595 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); | |
596 | if (ret) | |
597 | return ret; | |
598 | ||
599 | ret = ttm_tt_bind(bo->ttm, &tmp_mem); | |
600 | if (ret) | |
601 | goto out; | |
602 | ||
a0af9add | 603 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem); |
6ee73861 BS |
604 | if (ret) |
605 | goto out; | |
606 | ||
607 | ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem); | |
608 | out: | |
609 | if (tmp_mem.mm_node) { | |
610 | spin_lock(&bo->bdev->glob->lru_lock); | |
611 | drm_mm_put_block(tmp_mem.mm_node); | |
612 | spin_unlock(&bo->bdev->glob->lru_lock); | |
613 | } | |
614 | ||
615 | return ret; | |
616 | } | |
617 | ||
618 | static int | |
619 | nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | |
620 | bool no_wait, struct ttm_mem_reg *new_mem) | |
621 | { | |
622 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
623 | struct ttm_placement placement; | |
624 | struct ttm_mem_reg tmp_mem; | |
625 | int ret; | |
626 | ||
627 | placement.fpfn = placement.lpfn = 0; | |
628 | placement.num_placement = placement.num_busy_placement = 1; | |
77e2b5ed | 629 | placement.placement = placement.busy_placement = &placement_memtype; |
6ee73861 BS |
630 | |
631 | tmp_mem = *new_mem; | |
632 | tmp_mem.mm_node = NULL; | |
633 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); | |
634 | if (ret) | |
635 | return ret; | |
636 | ||
637 | ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem); | |
638 | if (ret) | |
639 | goto out; | |
640 | ||
27f691a6 | 641 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); |
6ee73861 BS |
642 | if (ret) |
643 | goto out; | |
644 | ||
645 | out: | |
646 | if (tmp_mem.mm_node) { | |
647 | spin_lock(&bo->bdev->glob->lru_lock); | |
648 | drm_mm_put_block(tmp_mem.mm_node); | |
649 | spin_unlock(&bo->bdev->glob->lru_lock); | |
650 | } | |
651 | ||
652 | return ret; | |
653 | } | |
654 | ||
655 | static int | |
a0af9add FJ |
656 | nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, |
657 | struct nouveau_tile_reg **new_tile) | |
6ee73861 BS |
658 | { |
659 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
6ee73861 | 660 | struct drm_device *dev = dev_priv->dev; |
a0af9add FJ |
661 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
662 | uint64_t offset; | |
6ee73861 BS |
663 | int ret; |
664 | ||
a0af9add FJ |
665 | if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { |
666 | /* Nothing to do. */ | |
667 | *new_tile = NULL; | |
668 | return 0; | |
669 | } | |
670 | ||
671 | offset = new_mem->mm_node->start << PAGE_SHIFT; | |
6ee73861 | 672 | |
a0af9add | 673 | if (dev_priv->card_type == NV_50) { |
6ee73861 BS |
674 | ret = nv50_mem_vm_bind_linear(dev, |
675 | offset + dev_priv->vm_vram_base, | |
676 | new_mem->size, nvbo->tile_flags, | |
677 | offset); | |
678 | if (ret) | |
679 | return ret; | |
a0af9add FJ |
680 | |
681 | } else if (dev_priv->card_type >= NV_10) { | |
682 | *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, | |
683 | nvbo->tile_mode); | |
6ee73861 BS |
684 | } |
685 | ||
a0af9add FJ |
686 | return 0; |
687 | } | |
688 | ||
689 | static void | |
690 | nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, | |
691 | struct nouveau_tile_reg *new_tile, | |
692 | struct nouveau_tile_reg **old_tile) | |
693 | { | |
694 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
695 | struct drm_device *dev = dev_priv->dev; | |
696 | ||
697 | if (dev_priv->card_type >= NV_10 && | |
698 | dev_priv->card_type < NV_50) { | |
699 | if (*old_tile) | |
700 | nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj); | |
701 | ||
702 | *old_tile = new_tile; | |
703 | } | |
704 | } | |
705 | ||
706 | static int | |
707 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |
708 | bool no_wait, struct ttm_mem_reg *new_mem) | |
709 | { | |
710 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
711 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
712 | struct ttm_mem_reg *old_mem = &bo->mem; | |
713 | struct nouveau_tile_reg *new_tile = NULL; | |
714 | int ret = 0; | |
715 | ||
716 | ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); | |
717 | if (ret) | |
718 | return ret; | |
719 | ||
720 | /* Software copy if the card isn't up and running yet. */ | |
0735f62e | 721 | if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || |
a0af9add FJ |
722 | !dev_priv->channel) { |
723 | ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | |
724 | goto out; | |
725 | } | |
6ee73861 | 726 | |
a0af9add | 727 | /* Fake bo copy. */ |
6ee73861 BS |
728 | if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { |
729 | BUG_ON(bo->mem.mm_node != NULL); | |
730 | bo->mem = *new_mem; | |
731 | new_mem->mm_node = NULL; | |
a0af9add | 732 | goto out; |
6ee73861 BS |
733 | } |
734 | ||
a0af9add FJ |
735 | /* Hardware assisted copy. */ |
736 | if (new_mem->mem_type == TTM_PL_SYSTEM) | |
737 | ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem); | |
738 | else if (old_mem->mem_type == TTM_PL_SYSTEM) | |
739 | ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem); | |
740 | else | |
741 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); | |
6ee73861 | 742 | |
a0af9add FJ |
743 | if (!ret) |
744 | goto out; | |
745 | ||
746 | /* Fallback to software copy. */ | |
747 | ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | |
748 | ||
749 | out: | |
750 | if (ret) | |
751 | nouveau_bo_vm_cleanup(bo, NULL, &new_tile); | |
752 | else | |
753 | nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); | |
754 | ||
755 | return ret; | |
6ee73861 BS |
756 | } |
757 | ||
758 | static int | |
759 | nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
760 | { | |
761 | return 0; | |
762 | } | |
763 | ||
764 | struct ttm_bo_driver nouveau_bo_driver = { | |
765 | .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, | |
766 | .invalidate_caches = nouveau_bo_invalidate_caches, | |
767 | .init_mem_type = nouveau_bo_init_mem_type, | |
768 | .evict_flags = nouveau_bo_evict_flags, | |
769 | .move = nouveau_bo_move, | |
770 | .verify_access = nouveau_bo_verify_access, | |
771 | .sync_obj_signaled = nouveau_fence_signalled, | |
772 | .sync_obj_wait = nouveau_fence_wait, | |
773 | .sync_obj_flush = nouveau_fence_flush, | |
774 | .sync_obj_unref = nouveau_fence_unref, | |
775 | .sync_obj_ref = nouveau_fence_ref, | |
776 | }; | |
777 |