]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/savage/savage_bci.c
Merge branch 'for-linus' of git://git.infradead.org/users/eparis/notify
[net-next-2.6.git] / drivers / gpu / drm / savage / savage_bci.c
CommitLineData
282a1674
DA
1/* savage_bci.c -- BCI support for Savage
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25#include "drmP.h"
26#include "savage_drm.h"
27#include "savage_drv.h"
28
29/* Need a long timeout for shadow status updates can take a while
30 * and so can waiting for events when the queue is full. */
b5e89ed5
DA
31#define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */
32#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
282a1674
DA
33#define SAVAGE_FREELIST_DEBUG 0
34
eddca551 35static int savage_do_cleanup_bci(struct drm_device *dev);
ce60fe02 36
282a1674 37static int
b5e89ed5 38savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
282a1674
DA
39{
40 uint32_t mask = dev_priv->status_used_mask;
41 uint32_t threshold = dev_priv->bci_threshold_hi;
42 uint32_t status;
43 int i;
44
45#if SAVAGE_BCI_DEBUG
46 if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
47 DRM_ERROR("Trying to emit %d words "
48 "(more than guaranteed space in COB)\n", n);
49#endif
50
51 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
52 DRM_MEMORYBARRIER();
53 status = dev_priv->status_ptr[0];
54 if ((status & mask) < threshold)
55 return 0;
56 DRM_UDELAY(1);
57 }
58
59#if SAVAGE_BCI_DEBUG
60 DRM_ERROR("failed!\n");
61 DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold);
62#endif
20caafa6 63 return -EBUSY;
282a1674
DA
64}
65
66static int
b5e89ed5 67savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n)
282a1674
DA
68{
69 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
70 uint32_t status;
71 int i;
72
73 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
74 status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
75 if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
76 return 0;
77 DRM_UDELAY(1);
78 }
79
80#if SAVAGE_BCI_DEBUG
81 DRM_ERROR("failed!\n");
82 DRM_INFO(" status=0x%08x\n", status);
83#endif
20caafa6 84 return -EBUSY;
282a1674
DA
85}
86
87static int
b5e89ed5 88savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n)
282a1674
DA
89{
90 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
91 uint32_t status;
92 int i;
93
94 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
95 status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
96 if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
97 return 0;
98 DRM_UDELAY(1);
99 }
100
101#if SAVAGE_BCI_DEBUG
102 DRM_ERROR("failed!\n");
103 DRM_INFO(" status=0x%08x\n", status);
104#endif
20caafa6 105 return -EBUSY;
282a1674
DA
106}
107
108/*
109 * Waiting for events.
110 *
111 * The BIOSresets the event tag to 0 on mode changes. Therefore we
112 * never emit 0 to the event tag. If we find a 0 event tag we know the
113 * BIOS stomped on it and return success assuming that the BIOS waited
114 * for engine idle.
115 *
116 * Note: if the Xserver uses the event tag it has to follow the same
117 * rule. Otherwise there may be glitches every 2^16 events.
118 */
119static int
b5e89ed5 120savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
282a1674
DA
121{
122 uint32_t status;
123 int i;
124
125 for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
126 DRM_MEMORYBARRIER();
127 status = dev_priv->status_ptr[1];
128 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
129 (status & 0xffff) == 0)
130 return 0;
131 DRM_UDELAY(1);
132 }
133
134#if SAVAGE_BCI_DEBUG
135 DRM_ERROR("failed!\n");
136 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
137#endif
138
20caafa6 139 return -EBUSY;
282a1674
DA
140}
141
142static int
b5e89ed5 143savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e)
282a1674
DA
144{
145 uint32_t status;
146 int i;
147
148 for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
149 status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
150 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
151 (status & 0xffff) == 0)
152 return 0;
153 DRM_UDELAY(1);
154 }
155
156#if SAVAGE_BCI_DEBUG
157 DRM_ERROR("failed!\n");
158 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
159#endif
160
20caafa6 161 return -EBUSY;
282a1674
DA
162}
163
b5e89ed5 164uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
282a1674
DA
165 unsigned int flags)
166{
167 uint16_t count;
168 BCI_LOCALS;
169
170 if (dev_priv->status_ptr) {
171 /* coordinate with Xserver */
172 count = dev_priv->status_ptr[1023];
173 if (count < dev_priv->event_counter)
174 dev_priv->event_wrap++;
175 } else {
176 count = dev_priv->event_counter;
177 }
178 count = (count + 1) & 0xffff;
179 if (count == 0) {
b5e89ed5 180 count++; /* See the comment above savage_wait_event_*. */
282a1674
DA
181 dev_priv->event_wrap++;
182 }
183 dev_priv->event_counter = count;
184 if (dev_priv->status_ptr)
b5e89ed5 185 dev_priv->status_ptr[1023] = (uint32_t) count;
282a1674
DA
186
187 if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
188 unsigned int wait_cmd = BCI_CMD_WAIT;
189 if ((flags & SAVAGE_WAIT_2D))
190 wait_cmd |= BCI_CMD_WAIT_2D;
191 if ((flags & SAVAGE_WAIT_3D))
192 wait_cmd |= BCI_CMD_WAIT_3D;
193 BEGIN_BCI(2);
194 BCI_WRITE(wait_cmd);
195 } else {
196 BEGIN_BCI(1);
197 }
b5e89ed5 198 BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t) count);
282a1674
DA
199
200 return count;
201}
202
203/*
204 * Freelist management
205 */
eddca551 206static int savage_freelist_init(struct drm_device * dev)
282a1674
DA
207{
208 drm_savage_private_t *dev_priv = dev->dev_private;
cdd55a29 209 struct drm_device_dma *dma = dev->dma;
056219e2 210 struct drm_buf *buf;
282a1674
DA
211 drm_savage_buf_priv_t *entry;
212 int i;
213 DRM_DEBUG("count=%d\n", dma->buf_count);
214
215 dev_priv->head.next = &dev_priv->tail;
216 dev_priv->head.prev = NULL;
217 dev_priv->head.buf = NULL;
218
219 dev_priv->tail.next = NULL;
220 dev_priv->tail.prev = &dev_priv->head;
221 dev_priv->tail.buf = NULL;
222
223 for (i = 0; i < dma->buf_count; i++) {
224 buf = dma->buflist[i];
225 entry = buf->dev_private;
226
227 SET_AGE(&entry->age, 0, 0);
228 entry->buf = buf;
229
230 entry->next = dev_priv->head.next;
231 entry->prev = &dev_priv->head;
232 dev_priv->head.next->prev = entry;
233 dev_priv->head.next = entry;
234 }
235
236 return 0;
237}
238
056219e2 239static struct drm_buf *savage_freelist_get(struct drm_device * dev)
282a1674
DA
240{
241 drm_savage_private_t *dev_priv = dev->dev_private;
242 drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
243 uint16_t event;
244 unsigned int wrap;
245 DRM_DEBUG("\n");
246
247 UPDATE_EVENT_COUNTER();
248 if (dev_priv->status_ptr)
249 event = dev_priv->status_ptr[1] & 0xffff;
250 else
251 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
252 wrap = dev_priv->event_wrap;
253 if (event > dev_priv->event_counter)
b5e89ed5 254 wrap--; /* hardware hasn't passed the last wrap yet */
282a1674
DA
255
256 DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
257 DRM_DEBUG(" head=0x%04x %d\n", event, wrap);
258
259 if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
260 drm_savage_buf_priv_t *next = tail->next;
261 drm_savage_buf_priv_t *prev = tail->prev;
262 prev->next = next;
263 next->prev = prev;
264 tail->next = tail->prev = NULL;
265 return tail->buf;
266 }
267
268 DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
269 return NULL;
270}
271
056219e2 272void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf)
282a1674
DA
273{
274 drm_savage_private_t *dev_priv = dev->dev_private;
275 drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
276
277 DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
278
279 if (entry->next != NULL || entry->prev != NULL) {
280 DRM_ERROR("entry already on freelist.\n");
281 return;
282 }
283
284 prev = &dev_priv->head;
285 next = prev->next;
286 prev->next = entry;
287 next->prev = entry;
288 entry->prev = prev;
289 entry->next = next;
290}
291
292/*
293 * Command DMA
294 */
b5e89ed5 295static int savage_dma_init(drm_savage_private_t * dev_priv)
282a1674
DA
296{
297 unsigned int i;
298
299 dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
b5e89ed5 300 (SAVAGE_DMA_PAGE_SIZE * 4);
9a298b2a
EA
301 dev_priv->dma_pages = kmalloc(sizeof(drm_savage_dma_page_t) *
302 dev_priv->nr_dma_pages, GFP_KERNEL);
282a1674 303 if (dev_priv->dma_pages == NULL)
20caafa6 304 return -ENOMEM;
282a1674
DA
305
306 for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
307 SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
308 dev_priv->dma_pages[i].used = 0;
309 dev_priv->dma_pages[i].flushed = 0;
310 }
311 SET_AGE(&dev_priv->last_dma_age, 0, 0);
312
313 dev_priv->first_dma_page = 0;
314 dev_priv->current_dma_page = 0;
315
316 return 0;
317}
318
b5e89ed5 319void savage_dma_reset(drm_savage_private_t * dev_priv)
282a1674
DA
320{
321 uint16_t event;
322 unsigned int wrap, i;
323 event = savage_bci_emit_event(dev_priv, 0);
324 wrap = dev_priv->event_wrap;
325 for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
326 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
327 dev_priv->dma_pages[i].used = 0;
328 dev_priv->dma_pages[i].flushed = 0;
329 }
330 SET_AGE(&dev_priv->last_dma_age, event, wrap);
331 dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
332}
333
b5e89ed5 334void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page)
282a1674
DA
335{
336 uint16_t event;
337 unsigned int wrap;
338
339 /* Faked DMA buffer pages don't age. */
340 if (dev_priv->cmd_dma == &dev_priv->fake_dma)
341 return;
342
343 UPDATE_EVENT_COUNTER();
344 if (dev_priv->status_ptr)
345 event = dev_priv->status_ptr[1] & 0xffff;
346 else
347 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
348 wrap = dev_priv->event_wrap;
349 if (event > dev_priv->event_counter)
b5e89ed5 350 wrap--; /* hardware hasn't passed the last wrap yet */
282a1674
DA
351
352 if (dev_priv->dma_pages[page].age.wrap > wrap ||
353 (dev_priv->dma_pages[page].age.wrap == wrap &&
354 dev_priv->dma_pages[page].age.event > event)) {
355 if (dev_priv->wait_evnt(dev_priv,
356 dev_priv->dma_pages[page].age.event)
357 < 0)
358 DRM_ERROR("wait_evnt failed!\n");
359 }
360}
361
b5e89ed5 362uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n)
282a1674
DA
363{
364 unsigned int cur = dev_priv->current_dma_page;
365 unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
b5e89ed5
DA
366 dev_priv->dma_pages[cur].used;
367 unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
368 SAVAGE_DMA_PAGE_SIZE;
282a1674
DA
369 uint32_t *dma_ptr;
370 unsigned int i;
371
372 DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
373 cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
374
375 if (cur + nr_pages < dev_priv->nr_dma_pages) {
b5e89ed5
DA
376 dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
377 cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
282a1674
DA
378 if (n < rest)
379 rest = n;
380 dev_priv->dma_pages[cur].used += rest;
381 n -= rest;
382 cur++;
383 } else {
384 dev_priv->dma_flush(dev_priv);
b5e89ed5
DA
385 nr_pages =
386 (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;
282a1674
DA
387 for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
388 dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
389 dev_priv->dma_pages[i].used = 0;
390 dev_priv->dma_pages[i].flushed = 0;
391 }
b5e89ed5 392 dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle;
282a1674
DA
393 dev_priv->first_dma_page = cur = 0;
394 }
395 for (i = cur; nr_pages > 0; ++i, --nr_pages) {
396#if SAVAGE_DMA_DEBUG
397 if (dev_priv->dma_pages[i].used) {
398 DRM_ERROR("unflushed page %u: used=%u\n",
399 i, dev_priv->dma_pages[i].used);
400 }
401#endif
402 if (n > SAVAGE_DMA_PAGE_SIZE)
403 dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
404 else
405 dev_priv->dma_pages[i].used = n;
406 n -= SAVAGE_DMA_PAGE_SIZE;
407 }
408 dev_priv->current_dma_page = --i;
409
410 DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
411 i, dev_priv->dma_pages[i].used, n);
412
413 savage_dma_wait(dev_priv, dev_priv->current_dma_page);
414
415 return dma_ptr;
416}
417
b5e89ed5 418static void savage_dma_flush(drm_savage_private_t * dev_priv)
282a1674
DA
419{
420 unsigned int first = dev_priv->first_dma_page;
421 unsigned int cur = dev_priv->current_dma_page;
422 uint16_t event;
423 unsigned int wrap, pad, align, len, i;
424 unsigned long phys_addr;
425 BCI_LOCALS;
426
427 if (first == cur &&
428 dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
429 return;
430
431 /* pad length to multiples of 2 entries
432 * align start of next DMA block to multiles of 8 entries */
433 pad = -dev_priv->dma_pages[cur].used & 1;
434 align = -(dev_priv->dma_pages[cur].used + pad) & 7;
435
436 DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
437 "pad=%u, align=%u\n",
438 first, cur, dev_priv->dma_pages[first].flushed,
439 dev_priv->dma_pages[cur].used, pad, align);
440
441 /* pad with noops */
442 if (pad) {
b5e89ed5
DA
443 uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
444 cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
282a1674 445 dev_priv->dma_pages[cur].used += pad;
b5e89ed5 446 while (pad != 0) {
282a1674
DA
447 *dma_ptr++ = BCI_CMD_WAIT;
448 pad--;
449 }
450 }
451
452 DRM_MEMORYBARRIER();
453
454 /* do flush ... */
455 phys_addr = dev_priv->cmd_dma->offset +
b5e89ed5
DA
456 (first * SAVAGE_DMA_PAGE_SIZE +
457 dev_priv->dma_pages[first].flushed) * 4;
282a1674 458 len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
b5e89ed5 459 dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed;
282a1674
DA
460
461 DRM_DEBUG("phys_addr=%lx, len=%u\n",
462 phys_addr | dev_priv->dma_type, len);
463
464 BEGIN_BCI(3);
465 BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
466 BCI_WRITE(phys_addr | dev_priv->dma_type);
467 BCI_DMA(len);
468
469 /* fix alignment of the start of the next block */
470 dev_priv->dma_pages[cur].used += align;
471
472 /* age DMA pages */
473 event = savage_bci_emit_event(dev_priv, 0);
474 wrap = dev_priv->event_wrap;
475 for (i = first; i < cur; ++i) {
476 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
477 dev_priv->dma_pages[i].used = 0;
478 dev_priv->dma_pages[i].flushed = 0;
479 }
480 /* age the current page only when it's full */
481 if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
482 SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
483 dev_priv->dma_pages[cur].used = 0;
484 dev_priv->dma_pages[cur].flushed = 0;
485 /* advance to next page */
486 cur++;
487 if (cur == dev_priv->nr_dma_pages)
488 cur = 0;
489 dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
490 } else {
491 dev_priv->first_dma_page = cur;
492 dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
493 }
494 SET_AGE(&dev_priv->last_dma_age, event, wrap);
495
496 DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
497 dev_priv->dma_pages[cur].used,
498 dev_priv->dma_pages[cur].flushed);
499}
500
b5e89ed5 501static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
282a1674
DA
502{
503 unsigned int i, j;
504 BCI_LOCALS;
505
506 if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
507 dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
508 return;
509
510 DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
511 dev_priv->first_dma_page, dev_priv->current_dma_page,
512 dev_priv->dma_pages[dev_priv->current_dma_page].used);
513
514 for (i = dev_priv->first_dma_page;
515 i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
516 ++i) {
b5e89ed5
DA
517 uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
518 i * SAVAGE_DMA_PAGE_SIZE;
282a1674
DA
519#if SAVAGE_DMA_DEBUG
520 /* Sanity check: all pages except the last one must be full. */
521 if (i < dev_priv->current_dma_page &&
522 dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
523 DRM_ERROR("partial DMA page %u: used=%u",
524 i, dev_priv->dma_pages[i].used);
525 }
526#endif
527 BEGIN_BCI(dev_priv->dma_pages[i].used);
528 for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
529 BCI_WRITE(dma_ptr[j]);
530 }
531 dev_priv->dma_pages[i].used = 0;
532 }
533
534 /* reset to first page */
535 dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
536}
537
eddca551 538int savage_driver_load(struct drm_device *dev, unsigned long chipset)
22eae947
DA
539{
540 drm_savage_private_t *dev_priv;
541
6ebc22e6 542 dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
22eae947 543 if (dev_priv == NULL)
20caafa6 544 return -ENOMEM;
22eae947 545
22eae947
DA
546 dev->dev_private = (void *)dev_priv;
547
548 dev_priv->chipset = (enum savage_family)chipset;
549
550 return 0;
551}
552
553
282a1674 554/*
421f91d2 555 * Initialize mappings. On Savage4 and SavageIX the alignment
282a1674 556 * and size of the aperture is not suitable for automatic MTRR setup
22eae947
DA
557 * in drm_addmap. Therefore we add them manually before the maps are
558 * initialized, and tear them down on last close.
282a1674 559 */
eddca551 560int savage_driver_firstopen(struct drm_device *dev)
282a1674 561{
22eae947 562 drm_savage_private_t *dev_priv = dev->dev_private;
282a1674
DA
563 unsigned long mmio_base, fb_base, fb_size, aperture_base;
564 /* fb_rsrc and aper_rsrc aren't really used currently, but still exist
565 * in case we decide we need information on the BAR for BSD in the
566 * future.
567 */
568 unsigned int fb_rsrc, aper_rsrc;
569 int ret = 0;
570
282a1674
DA
571 dev_priv->mtrr[0].handle = -1;
572 dev_priv->mtrr[1].handle = -1;
573 dev_priv->mtrr[2].handle = -1;
574 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
575 fb_rsrc = 0;
01d73a69 576 fb_base = pci_resource_start(dev->pdev, 0);
282a1674
DA
577 fb_size = SAVAGE_FB_SIZE_S3;
578 mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
579 aper_rsrc = 0;
580 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
581 /* this should always be true */
01d73a69 582 if (pci_resource_len(dev->pdev, 0) == 0x08000000) {
282a1674
DA
583 /* Don't make MMIO write-cobining! We need 3
584 * MTRRs. */
585 dev_priv->mtrr[0].base = fb_base;
586 dev_priv->mtrr[0].size = 0x01000000;
b5e89ed5 587 dev_priv->mtrr[0].handle =
269dc512
DA
588 drm_mtrr_add(dev_priv->mtrr[0].base,
589 dev_priv->mtrr[0].size, DRM_MTRR_WC);
b5e89ed5 590 dev_priv->mtrr[1].base = fb_base + 0x02000000;
282a1674 591 dev_priv->mtrr[1].size = 0x02000000;
b5e89ed5 592 dev_priv->mtrr[1].handle =
269dc512
DA
593 drm_mtrr_add(dev_priv->mtrr[1].base,
594 dev_priv->mtrr[1].size, DRM_MTRR_WC);
b5e89ed5 595 dev_priv->mtrr[2].base = fb_base + 0x04000000;
282a1674 596 dev_priv->mtrr[2].size = 0x04000000;
b5e89ed5 597 dev_priv->mtrr[2].handle =
269dc512
DA
598 drm_mtrr_add(dev_priv->mtrr[2].base,
599 dev_priv->mtrr[2].size, DRM_MTRR_WC);
282a1674 600 } else {
d883f7f1 601 DRM_ERROR("strange pci_resource_len %08llx\n",
01d73a69
JC
602 (unsigned long long)
603 pci_resource_len(dev->pdev, 0));
282a1674 604 }
22eae947
DA
605 } else if (dev_priv->chipset != S3_SUPERSAVAGE &&
606 dev_priv->chipset != S3_SAVAGE2000) {
01d73a69 607 mmio_base = pci_resource_start(dev->pdev, 0);
282a1674 608 fb_rsrc = 1;
01d73a69 609 fb_base = pci_resource_start(dev->pdev, 1);
282a1674
DA
610 fb_size = SAVAGE_FB_SIZE_S4;
611 aper_rsrc = 1;
612 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
613 /* this should always be true */
01d73a69 614 if (pci_resource_len(dev->pdev, 1) == 0x08000000) {
282a1674
DA
615 /* Can use one MTRR to cover both fb and
616 * aperture. */
617 dev_priv->mtrr[0].base = fb_base;
618 dev_priv->mtrr[0].size = 0x08000000;
b5e89ed5 619 dev_priv->mtrr[0].handle =
269dc512
DA
620 drm_mtrr_add(dev_priv->mtrr[0].base,
621 dev_priv->mtrr[0].size, DRM_MTRR_WC);
282a1674 622 } else {
d883f7f1 623 DRM_ERROR("strange pci_resource_len %08llx\n",
01d73a69
JC
624 (unsigned long long)
625 pci_resource_len(dev->pdev, 1));
282a1674
DA
626 }
627 } else {
01d73a69 628 mmio_base = pci_resource_start(dev->pdev, 0);
282a1674 629 fb_rsrc = 1;
01d73a69
JC
630 fb_base = pci_resource_start(dev->pdev, 1);
631 fb_size = pci_resource_len(dev->pdev, 1);
282a1674 632 aper_rsrc = 2;
01d73a69 633 aperture_base = pci_resource_start(dev->pdev, 2);
282a1674
DA
634 /* Automatic MTRR setup will do the right thing. */
635 }
636
637 ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
638 _DRM_READ_ONLY, &dev_priv->mmio);
639 if (ret)
640 return ret;
641
642 ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
643 _DRM_WRITE_COMBINING, &dev_priv->fb);
644 if (ret)
645 return ret;
646
647 ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
648 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
649 &dev_priv->aperture);
650 if (ret)
651 return ret;
652
653 return ret;
654}
655
656/*
657 * Delete MTRRs and free device-private data.
658 */
eddca551 659void savage_driver_lastclose(struct drm_device *dev)
282a1674
DA
660{
661 drm_savage_private_t *dev_priv = dev->dev_private;
662 int i;
663
664 for (i = 0; i < 3; ++i)
665 if (dev_priv->mtrr[i].handle >= 0)
269dc512 666 drm_mtrr_del(dev_priv->mtrr[i].handle,
282a1674 667 dev_priv->mtrr[i].base,
269dc512 668 dev_priv->mtrr[i].size, DRM_MTRR_WC);
22eae947
DA
669}
670
eddca551 671int savage_driver_unload(struct drm_device *dev)
22eae947
DA
672{
673 drm_savage_private_t *dev_priv = dev->dev_private;
282a1674 674
9a298b2a 675 kfree(dev_priv);
282a1674
DA
676
677 return 0;
678}
679
eddca551 680static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
282a1674
DA
681{
682 drm_savage_private_t *dev_priv = dev->dev_private;
683
684 if (init->fb_bpp != 16 && init->fb_bpp != 32) {
685 DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
20caafa6 686 return -EINVAL;
282a1674
DA
687 }
688 if (init->depth_bpp != 16 && init->depth_bpp != 32) {
689 DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
20caafa6 690 return -EINVAL;
282a1674
DA
691 }
692 if (init->dma_type != SAVAGE_DMA_AGP &&
693 init->dma_type != SAVAGE_DMA_PCI) {
694 DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
20caafa6 695 return -EINVAL;
282a1674
DA
696 }
697
698 dev_priv->cob_size = init->cob_size;
699 dev_priv->bci_threshold_lo = init->bci_threshold_lo;
700 dev_priv->bci_threshold_hi = init->bci_threshold_hi;
701 dev_priv->dma_type = init->dma_type;
702
703 dev_priv->fb_bpp = init->fb_bpp;
704 dev_priv->front_offset = init->front_offset;
705 dev_priv->front_pitch = init->front_pitch;
706 dev_priv->back_offset = init->back_offset;
707 dev_priv->back_pitch = init->back_pitch;
708 dev_priv->depth_bpp = init->depth_bpp;
709 dev_priv->depth_offset = init->depth_offset;
710 dev_priv->depth_pitch = init->depth_pitch;
711
712 dev_priv->texture_offset = init->texture_offset;
713 dev_priv->texture_size = init->texture_size;
714
da509d7a 715 dev_priv->sarea = drm_getsarea(dev);
282a1674
DA
716 if (!dev_priv->sarea) {
717 DRM_ERROR("could not find sarea!\n");
718 savage_do_cleanup_bci(dev);
20caafa6 719 return -EINVAL;
282a1674
DA
720 }
721 if (init->status_offset != 0) {
722 dev_priv->status = drm_core_findmap(dev, init->status_offset);
723 if (!dev_priv->status) {
724 DRM_ERROR("could not find shadow status region!\n");
725 savage_do_cleanup_bci(dev);
20caafa6 726 return -EINVAL;
282a1674
DA
727 }
728 } else {
729 dev_priv->status = NULL;
730 }
731 if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
10eee0fe 732 dev->agp_buffer_token = init->buffers_offset;
282a1674
DA
733 dev->agp_buffer_map = drm_core_findmap(dev,
734 init->buffers_offset);
735 if (!dev->agp_buffer_map) {
736 DRM_ERROR("could not find DMA buffer region!\n");
737 savage_do_cleanup_bci(dev);
20caafa6 738 return -EINVAL;
282a1674
DA
739 }
740 drm_core_ioremap(dev->agp_buffer_map, dev);
741 if (!dev->agp_buffer_map) {
742 DRM_ERROR("failed to ioremap DMA buffer region!\n");
743 savage_do_cleanup_bci(dev);
20caafa6 744 return -ENOMEM;
282a1674
DA
745 }
746 }
747 if (init->agp_textures_offset) {
748 dev_priv->agp_textures =
b5e89ed5 749 drm_core_findmap(dev, init->agp_textures_offset);
282a1674
DA
750 if (!dev_priv->agp_textures) {
751 DRM_ERROR("could not find agp texture region!\n");
752 savage_do_cleanup_bci(dev);
20caafa6 753 return -EINVAL;
282a1674
DA
754 }
755 } else {
756 dev_priv->agp_textures = NULL;
757 }
758
759 if (init->cmd_dma_offset) {
760 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
761 DRM_ERROR("command DMA not supported on "
762 "Savage3D/MX/IX.\n");
763 savage_do_cleanup_bci(dev);
20caafa6 764 return -EINVAL;
282a1674
DA
765 }
766 if (dev->dma && dev->dma->buflist) {
767 DRM_ERROR("command and vertex DMA not supported "
768 "at the same time.\n");
769 savage_do_cleanup_bci(dev);
20caafa6 770 return -EINVAL;
282a1674
DA
771 }
772 dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
773 if (!dev_priv->cmd_dma) {
774 DRM_ERROR("could not find command DMA region!\n");
775 savage_do_cleanup_bci(dev);
20caafa6 776 return -EINVAL;
282a1674
DA
777 }
778 if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
779 if (dev_priv->cmd_dma->type != _DRM_AGP) {
780 DRM_ERROR("AGP command DMA region is not a "
781 "_DRM_AGP map!\n");
782 savage_do_cleanup_bci(dev);
20caafa6 783 return -EINVAL;
282a1674
DA
784 }
785 drm_core_ioremap(dev_priv->cmd_dma, dev);
786 if (!dev_priv->cmd_dma->handle) {
787 DRM_ERROR("failed to ioremap command "
788 "DMA region!\n");
789 savage_do_cleanup_bci(dev);
20caafa6 790 return -ENOMEM;
282a1674
DA
791 }
792 } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
793 DRM_ERROR("PCI command DMA region is not a "
794 "_DRM_CONSISTENT map!\n");
795 savage_do_cleanup_bci(dev);
20caafa6 796 return -EINVAL;
282a1674
DA
797 }
798 } else {
799 dev_priv->cmd_dma = NULL;
800 }
801
802 dev_priv->dma_flush = savage_dma_flush;
803 if (!dev_priv->cmd_dma) {
804 DRM_DEBUG("falling back to faked command DMA.\n");
805 dev_priv->fake_dma.offset = 0;
806 dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
807 dev_priv->fake_dma.type = _DRM_SHM;
9a298b2a
EA
808 dev_priv->fake_dma.handle = kmalloc(SAVAGE_FAKE_DMA_SIZE,
809 GFP_KERNEL);
282a1674
DA
810 if (!dev_priv->fake_dma.handle) {
811 DRM_ERROR("could not allocate faked DMA buffer!\n");
812 savage_do_cleanup_bci(dev);
20caafa6 813 return -ENOMEM;
282a1674
DA
814 }
815 dev_priv->cmd_dma = &dev_priv->fake_dma;
816 dev_priv->dma_flush = savage_fake_dma_flush;
817 }
818
819 dev_priv->sarea_priv =
b5e89ed5
DA
820 (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle +
821 init->sarea_priv_offset);
282a1674
DA
822
823 /* setup bitmap descriptors */
824 {
825 unsigned int color_tile_format;
826 unsigned int depth_tile_format;
827 unsigned int front_stride, back_stride, depth_stride;
828 if (dev_priv->chipset <= S3_SAVAGE4) {
829 color_tile_format = dev_priv->fb_bpp == 16 ?
b5e89ed5 830 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
282a1674 831 depth_tile_format = dev_priv->depth_bpp == 16 ?
b5e89ed5 832 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
282a1674
DA
833 } else {
834 color_tile_format = SAVAGE_BD_TILE_DEST;
835 depth_tile_format = SAVAGE_BD_TILE_DEST;
836 }
b5e89ed5
DA
837 front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
838 back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
839 depth_stride =
840 dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
282a1674
DA
841
842 dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
b5e89ed5
DA
843 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
844 (color_tile_format << SAVAGE_BD_TILE_SHIFT);
282a1674 845
b5e89ed5
DA
846 dev_priv->back_bd = back_stride | SAVAGE_BD_BW_DISABLE |
847 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
848 (color_tile_format << SAVAGE_BD_TILE_SHIFT);
282a1674
DA
849
850 dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
b5e89ed5
DA
851 (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
852 (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
282a1674
DA
853 }
854
855 /* setup status and bci ptr */
856 dev_priv->event_counter = 0;
857 dev_priv->event_wrap = 0;
858 dev_priv->bci_ptr = (volatile uint32_t *)
b5e89ed5 859 ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
282a1674
DA
860 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
861 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
862 } else {
863 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
864 }
865 if (dev_priv->status != NULL) {
866 dev_priv->status_ptr =
b5e89ed5 867 (volatile uint32_t *)dev_priv->status->handle;
282a1674
DA
868 dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
869 dev_priv->wait_evnt = savage_bci_wait_event_shadow;
870 dev_priv->status_ptr[1023] = dev_priv->event_counter;
871 } else {
872 dev_priv->status_ptr = NULL;
873 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
874 dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
875 } else {
876 dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
877 }
878 dev_priv->wait_evnt = savage_bci_wait_event_reg;
879 }
880
881 /* cliprect functions */
882 if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
883 dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
884 else
885 dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
886
887 if (savage_freelist_init(dev) < 0) {
888 DRM_ERROR("could not initialize freelist\n");
889 savage_do_cleanup_bci(dev);
20caafa6 890 return -ENOMEM;
282a1674
DA
891 }
892
b5e89ed5 893 if (savage_dma_init(dev_priv) < 0) {
282a1674
DA
894 DRM_ERROR("could not initialize command DMA\n");
895 savage_do_cleanup_bci(dev);
20caafa6 896 return -ENOMEM;
282a1674
DA
897 }
898
899 return 0;
900}
901
eddca551 902static int savage_do_cleanup_bci(struct drm_device * dev)
282a1674
DA
903{
904 drm_savage_private_t *dev_priv = dev->dev_private;
905
906 if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
9a298b2a 907 kfree(dev_priv->fake_dma.handle);
282a1674
DA
908 } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
909 dev_priv->cmd_dma->type == _DRM_AGP &&
910 dev_priv->dma_type == SAVAGE_DMA_AGP)
911 drm_core_ioremapfree(dev_priv->cmd_dma, dev);
912
913 if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
914 dev->agp_buffer_map && dev->agp_buffer_map->handle) {
915 drm_core_ioremapfree(dev->agp_buffer_map, dev);
916 /* make sure the next instance (which may be running
917 * in PCI mode) doesn't try to use an old
918 * agp_buffer_map. */
919 dev->agp_buffer_map = NULL;
920 }
921
9a298b2a 922 kfree(dev_priv->dma_pages);
282a1674
DA
923
924 return 0;
925}
926
c153f45f 927static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
282a1674 928{
c153f45f 929 drm_savage_init_t *init = data;
282a1674 930
6c340eac 931 LOCK_TEST_WITH_RETURN(dev, file_priv);
282a1674 932
c153f45f 933 switch (init->func) {
282a1674 934 case SAVAGE_INIT_BCI:
c153f45f 935 return savage_do_init_bci(dev, init);
282a1674
DA
936 case SAVAGE_CLEANUP_BCI:
937 return savage_do_cleanup_bci(dev);
938 }
939
20caafa6 940 return -EINVAL;
282a1674
DA
941}
942
c153f45f 943static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
282a1674 944{
282a1674 945 drm_savage_private_t *dev_priv = dev->dev_private;
c153f45f 946 drm_savage_event_emit_t *event = data;
282a1674
DA
947
948 DRM_DEBUG("\n");
949
6c340eac 950 LOCK_TEST_WITH_RETURN(dev, file_priv);
282a1674 951
c153f45f
EA
952 event->count = savage_bci_emit_event(dev_priv, event->flags);
953 event->count |= dev_priv->event_wrap << 16;
282a1674 954
282a1674
DA
955 return 0;
956}
957
c153f45f 958static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
282a1674 959{
282a1674 960 drm_savage_private_t *dev_priv = dev->dev_private;
c153f45f 961 drm_savage_event_wait_t *event = data;
282a1674
DA
962 unsigned int event_e, hw_e;
963 unsigned int event_w, hw_w;
964
965 DRM_DEBUG("\n");
966
282a1674
DA
967 UPDATE_EVENT_COUNTER();
968 if (dev_priv->status_ptr)
969 hw_e = dev_priv->status_ptr[1] & 0xffff;
970 else
971 hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
972 hw_w = dev_priv->event_wrap;
973 if (hw_e > dev_priv->event_counter)
b5e89ed5 974 hw_w--; /* hardware hasn't passed the last wrap yet */
282a1674 975
c153f45f
EA
976 event_e = event->count & 0xffff;
977 event_w = event->count >> 16;
282a1674
DA
978
979 /* Don't need to wait if
980 * - event counter wrapped since the event was emitted or
981 * - the hardware has advanced up to or over the event to wait for.
982 */
b5e89ed5 983 if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
282a1674
DA
984 return 0;
985 else
986 return dev_priv->wait_evnt(dev_priv, event_e);
987}
988
989/*
990 * DMA buffer management
991 */
992
6c340eac
EA
993static int savage_bci_get_buffers(struct drm_device *dev,
994 struct drm_file *file_priv,
995 struct drm_dma *d)
282a1674 996{
056219e2 997 struct drm_buf *buf;
282a1674
DA
998 int i;
999
1000 for (i = d->granted_count; i < d->request_count; i++) {
1001 buf = savage_freelist_get(dev);
1002 if (!buf)
20caafa6 1003 return -EAGAIN;
282a1674 1004
6c340eac 1005 buf->file_priv = file_priv;
282a1674
DA
1006
1007 if (DRM_COPY_TO_USER(&d->request_indices[i],
1008 &buf->idx, sizeof(buf->idx)))
20caafa6 1009 return -EFAULT;
282a1674
DA
1010 if (DRM_COPY_TO_USER(&d->request_sizes[i],
1011 &buf->total, sizeof(buf->total)))
20caafa6 1012 return -EFAULT;
282a1674
DA
1013
1014 d->granted_count++;
1015 }
1016 return 0;
1017}
1018
c153f45f 1019int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
282a1674 1020{
cdd55a29 1021 struct drm_device_dma *dma = dev->dma;
c153f45f 1022 struct drm_dma *d = data;
282a1674
DA
1023 int ret = 0;
1024
6c340eac 1025 LOCK_TEST_WITH_RETURN(dev, file_priv);
282a1674 1026
282a1674
DA
1027 /* Please don't send us buffers.
1028 */
c153f45f 1029 if (d->send_count != 0) {
282a1674 1030 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
c153f45f 1031 DRM_CURRENTPID, d->send_count);
20caafa6 1032 return -EINVAL;
282a1674
DA
1033 }
1034
1035 /* We'll send you buffers.
1036 */
c153f45f 1037 if (d->request_count < 0 || d->request_count > dma->buf_count) {
282a1674 1038 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
c153f45f 1039 DRM_CURRENTPID, d->request_count, dma->buf_count);
20caafa6 1040 return -EINVAL;
282a1674
DA
1041 }
1042
c153f45f 1043 d->granted_count = 0;
282a1674 1044
c153f45f
EA
1045 if (d->request_count) {
1046 ret = savage_bci_get_buffers(dev, file_priv, d);
282a1674
DA
1047 }
1048
282a1674
DA
1049 return ret;
1050}
1051
6c340eac 1052void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
b5e89ed5 1053{
cdd55a29 1054 struct drm_device_dma *dma = dev->dma;
282a1674
DA
1055 drm_savage_private_t *dev_priv = dev->dev_private;
1056 int i;
1057
1058 if (!dma)
1059 return;
1060 if (!dev_priv)
1061 return;
1062 if (!dma->buflist)
1063 return;
1064
b5e89ed5 1065 /*i830_flush_queue(dev); */
282a1674
DA
1066
1067 for (i = 0; i < dma->buf_count; i++) {
056219e2 1068 struct drm_buf *buf = dma->buflist[i];
282a1674
DA
1069 drm_savage_buf_priv_t *buf_priv = buf->dev_private;
1070
6c340eac 1071 if (buf->file_priv == file_priv && buf_priv &&
282a1674
DA
1072 buf_priv->next == NULL && buf_priv->prev == NULL) {
1073 uint16_t event;
1074 DRM_DEBUG("reclaimed from client\n");
1075 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1076 SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1077 savage_freelist_put(dev, buf);
1078 }
1079 }
1080
6c340eac 1081 drm_core_reclaim_buffers(dev, file_priv);
282a1674
DA
1082}
1083
c153f45f 1084struct drm_ioctl_desc savage_ioctls[] = {
1b2f1489
DA
1085 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1086 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
1087 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
1088 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
282a1674
DA
1089};
1090
1091int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);