]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/gpu/drm/radeon/r600_cs.c
drm/radeon/kms: cleanup - remove radeon_share.h
[net-next-2.6.git] / drivers / gpu / drm / radeon / r600_cs.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include "drmP.h"
29 #include "radeon.h"
30 #include "r600d.h"
31 #include "avivod.h"
32
33 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
34                                         struct radeon_cs_reloc **cs_reloc);
35 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
36                                         struct radeon_cs_reloc **cs_reloc);
37 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
38 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
39
40 /**
41  * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
42  * @parser:     parser structure holding parsing context.
43  * @pkt:        where to store packet informations
44  *
45  * Assume that chunk_ib_index is properly set. Will return -EINVAL
46  * if packet is bigger than remaining ib size. or if packets is unknown.
47  **/
48 int r600_cs_packet_parse(struct radeon_cs_parser *p,
49                         struct radeon_cs_packet *pkt,
50                         unsigned idx)
51 {
52         struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
53         uint32_t header;
54
55         if (idx >= ib_chunk->length_dw) {
56                 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
57                           idx, ib_chunk->length_dw);
58                 return -EINVAL;
59         }
60         header = ib_chunk->kdata[idx];
61         pkt->idx = idx;
62         pkt->type = CP_PACKET_GET_TYPE(header);
63         pkt->count = CP_PACKET_GET_COUNT(header);
64         pkt->one_reg_wr = 0;
65         switch (pkt->type) {
66         case PACKET_TYPE0:
67                 pkt->reg = CP_PACKET0_GET_REG(header);
68                 break;
69         case PACKET_TYPE3:
70                 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
71                 break;
72         case PACKET_TYPE2:
73                 pkt->count = -1;
74                 break;
75         default:
76                 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
77                 return -EINVAL;
78         }
79         if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
80                 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
81                           pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
82                 return -EINVAL;
83         }
84         return 0;
85 }
86
87 /**
88  * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
89  * @parser:             parser structure holding parsing context.
90  * @data:               pointer to relocation data
91  * @offset_start:       starting offset
92  * @offset_mask:        offset mask (to align start offset on)
93  * @reloc:              reloc informations
94  *
95  * Check next packet is relocation packet3, do bo validation and compute
96  * GPU offset using the provided start.
97  **/
98 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
99                                         struct radeon_cs_reloc **cs_reloc)
100 {
101         struct radeon_cs_chunk *ib_chunk;
102         struct radeon_cs_chunk *relocs_chunk;
103         struct radeon_cs_packet p3reloc;
104         unsigned idx;
105         int r;
106
107         if (p->chunk_relocs_idx == -1) {
108                 DRM_ERROR("No relocation chunk !\n");
109                 return -EINVAL;
110         }
111         *cs_reloc = NULL;
112         ib_chunk = &p->chunks[p->chunk_ib_idx];
113         relocs_chunk = &p->chunks[p->chunk_relocs_idx];
114         r = r600_cs_packet_parse(p, &p3reloc, p->idx);
115         if (r) {
116                 return r;
117         }
118         p->idx += p3reloc.count + 2;
119         if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
120                 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
121                           p3reloc.idx);
122                 return -EINVAL;
123         }
124         idx = ib_chunk->kdata[p3reloc.idx + 1];
125         if (idx >= relocs_chunk->length_dw) {
126                 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
127                           idx, relocs_chunk->length_dw);
128                 return -EINVAL;
129         }
130         /* FIXME: we assume reloc size is 4 dwords */
131         *cs_reloc = p->relocs_ptr[(idx / 4)];
132         return 0;
133 }
134
135 /**
136  * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
137  * @parser:             parser structure holding parsing context.
138  * @data:               pointer to relocation data
139  * @offset_start:       starting offset
140  * @offset_mask:        offset mask (to align start offset on)
141  * @reloc:              reloc informations
142  *
143  * Check next packet is relocation packet3, do bo validation and compute
144  * GPU offset using the provided start.
145  **/
146 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
147                                         struct radeon_cs_reloc **cs_reloc)
148 {
149         struct radeon_cs_chunk *ib_chunk;
150         struct radeon_cs_chunk *relocs_chunk;
151         struct radeon_cs_packet p3reloc;
152         unsigned idx;
153         int r;
154
155         if (p->chunk_relocs_idx == -1) {
156                 DRM_ERROR("No relocation chunk !\n");
157                 return -EINVAL;
158         }
159         *cs_reloc = NULL;
160         ib_chunk = &p->chunks[p->chunk_ib_idx];
161         relocs_chunk = &p->chunks[p->chunk_relocs_idx];
162         r = r600_cs_packet_parse(p, &p3reloc, p->idx);
163         if (r) {
164                 return r;
165         }
166         p->idx += p3reloc.count + 2;
167         if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
168                 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
169                           p3reloc.idx);
170                 return -EINVAL;
171         }
172         idx = ib_chunk->kdata[p3reloc.idx + 1];
173         if (idx >= relocs_chunk->length_dw) {
174                 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
175                           idx, relocs_chunk->length_dw);
176                 return -EINVAL;
177         }
178         *cs_reloc = &p->relocs[0];
179         (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
180         (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
181         return 0;
182 }
183
184 static int r600_packet0_check(struct radeon_cs_parser *p,
185                                 struct radeon_cs_packet *pkt,
186                                 unsigned idx, unsigned reg)
187 {
188         switch (reg) {
189         case AVIVO_D1MODE_VLINE_START_END:
190         case AVIVO_D2MODE_VLINE_START_END:
191                 break;
192         default:
193                 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
194                        reg, idx);
195                 return -EINVAL;
196         }
197         return 0;
198 }
199
200 static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
201                                 struct radeon_cs_packet *pkt)
202 {
203         unsigned reg, i;
204         unsigned idx;
205         int r;
206
207         idx = pkt->idx + 1;
208         reg = pkt->reg;
209         for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
210                 r = r600_packet0_check(p, pkt, idx, reg);
211                 if (r) {
212                         return r;
213                 }
214         }
215         return 0;
216 }
217
218 static int r600_packet3_check(struct radeon_cs_parser *p,
219                                 struct radeon_cs_packet *pkt)
220 {
221         struct radeon_cs_chunk *ib_chunk;
222         struct radeon_cs_reloc *reloc;
223         volatile u32 *ib;
224         unsigned idx;
225         unsigned i;
226         unsigned start_reg, end_reg, reg;
227         int r;
228
229         ib = p->ib->ptr;
230         ib_chunk = &p->chunks[p->chunk_ib_idx];
231         idx = pkt->idx + 1;
232         switch (pkt->opcode) {
233         case PACKET3_START_3D_CMDBUF:
234                 if (p->family >= CHIP_RV770 || pkt->count) {
235                         DRM_ERROR("bad START_3D\n");
236                         return -EINVAL;
237                 }
238                 break;
239         case PACKET3_CONTEXT_CONTROL:
240                 if (pkt->count != 1) {
241                         DRM_ERROR("bad CONTEXT_CONTROL\n");
242                         return -EINVAL;
243                 }
244                 break;
245         case PACKET3_INDEX_TYPE:
246         case PACKET3_NUM_INSTANCES:
247                 if (pkt->count) {
248                         DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
249                         return -EINVAL;
250                 }
251                 break;
252         case PACKET3_DRAW_INDEX:
253                 if (pkt->count != 3) {
254                         DRM_ERROR("bad DRAW_INDEX\n");
255                         return -EINVAL;
256                 }
257                 r = r600_cs_packet_next_reloc(p, &reloc);
258                 if (r) {
259                         DRM_ERROR("bad DRAW_INDEX\n");
260                         return -EINVAL;
261                 }
262                 ib[idx+0] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
263                 ib[idx+1] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
264                 break;
265         case PACKET3_DRAW_INDEX_AUTO:
266                 if (pkt->count != 1) {
267                         DRM_ERROR("bad DRAW_INDEX_AUTO\n");
268                         return -EINVAL;
269                 }
270                 break;
271         case PACKET3_DRAW_INDEX_IMMD_BE:
272         case PACKET3_DRAW_INDEX_IMMD:
273                 if (pkt->count < 2) {
274                         DRM_ERROR("bad DRAW_INDEX_IMMD\n");
275                         return -EINVAL;
276                 }
277                 break;
278         case PACKET3_WAIT_REG_MEM:
279                 if (pkt->count != 5) {
280                         DRM_ERROR("bad WAIT_REG_MEM\n");
281                         return -EINVAL;
282                 }
283                 /* bit 4 is reg (0) or mem (1) */
284                 if (ib_chunk->kdata[idx+0] & 0x10) {
285                         r = r600_cs_packet_next_reloc(p, &reloc);
286                         if (r) {
287                                 DRM_ERROR("bad WAIT_REG_MEM\n");
288                                 return -EINVAL;
289                         }
290                         ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
291                         ib[idx+2] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
292                 }
293                 break;
294         case PACKET3_SURFACE_SYNC:
295                 if (pkt->count != 3) {
296                         DRM_ERROR("bad SURFACE_SYNC\n");
297                         return -EINVAL;
298                 }
299                 /* 0xffffffff/0x0 is flush all cache flag */
300                 if (ib_chunk->kdata[idx+1] != 0xffffffff ||
301                     ib_chunk->kdata[idx+2] != 0) {
302                         r = r600_cs_packet_next_reloc(p, &reloc);
303                         if (r) {
304                                 DRM_ERROR("bad SURFACE_SYNC\n");
305                                 return -EINVAL;
306                         }
307                         ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
308                 }
309                 break;
310         case PACKET3_EVENT_WRITE:
311                 if (pkt->count != 2 && pkt->count != 0) {
312                         DRM_ERROR("bad EVENT_WRITE\n");
313                         return -EINVAL;
314                 }
315                 if (pkt->count) {
316                         r = r600_cs_packet_next_reloc(p, &reloc);
317                         if (r) {
318                                 DRM_ERROR("bad EVENT_WRITE\n");
319                                 return -EINVAL;
320                         }
321                         ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
322                         ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
323                 }
324                 break;
325         case PACKET3_EVENT_WRITE_EOP:
326                 if (pkt->count != 4) {
327                         DRM_ERROR("bad EVENT_WRITE_EOP\n");
328                         return -EINVAL;
329                 }
330                 r = r600_cs_packet_next_reloc(p, &reloc);
331                 if (r) {
332                         DRM_ERROR("bad EVENT_WRITE\n");
333                         return -EINVAL;
334                 }
335                 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
336                 ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
337                 break;
338         case PACKET3_SET_CONFIG_REG:
339                 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
340                 end_reg = 4 * pkt->count + start_reg - 4;
341                 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
342                     (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
343                     (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
344                         DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
345                         return -EINVAL;
346                 }
347                 for (i = 0; i < pkt->count; i++) {
348                         reg = start_reg + (4 * i);
349                         switch (reg) {
350                         case CP_COHER_BASE:
351                                 /* use PACKET3_SURFACE_SYNC */
352                                 return -EINVAL;
353                         default:
354                                 break;
355                         }
356                 }
357                 break;
358         case PACKET3_SET_CONTEXT_REG:
359                 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
360                 end_reg = 4 * pkt->count + start_reg - 4;
361                 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
362                     (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
363                     (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
364                         DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
365                         return -EINVAL;
366                 }
367                 for (i = 0; i < pkt->count; i++) {
368                         reg = start_reg + (4 * i);
369                         switch (reg) {
370                         case DB_DEPTH_BASE:
371                         case CB_COLOR0_BASE:
372                         case CB_COLOR1_BASE:
373                         case CB_COLOR2_BASE:
374                         case CB_COLOR3_BASE:
375                         case CB_COLOR4_BASE:
376                         case CB_COLOR5_BASE:
377                         case CB_COLOR6_BASE:
378                         case CB_COLOR7_BASE:
379                         case SQ_PGM_START_FS:
380                         case SQ_PGM_START_ES:
381                         case SQ_PGM_START_VS:
382                         case SQ_PGM_START_GS:
383                         case SQ_PGM_START_PS:
384                                 r = r600_cs_packet_next_reloc(p, &reloc);
385                                 if (r) {
386                                         DRM_ERROR("bad SET_CONTEXT_REG "
387                                                         "0x%04X\n", reg);
388                                         return -EINVAL;
389                                 }
390                                 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
391                                 break;
392                         case VGT_DMA_BASE:
393                         case VGT_DMA_BASE_HI:
394                                 /* These should be handled by DRAW_INDEX packet 3 */
395                         case VGT_STRMOUT_BASE_OFFSET_0:
396                         case VGT_STRMOUT_BASE_OFFSET_1:
397                         case VGT_STRMOUT_BASE_OFFSET_2:
398                         case VGT_STRMOUT_BASE_OFFSET_3:
399                         case VGT_STRMOUT_BASE_OFFSET_HI_0:
400                         case VGT_STRMOUT_BASE_OFFSET_HI_1:
401                         case VGT_STRMOUT_BASE_OFFSET_HI_2:
402                         case VGT_STRMOUT_BASE_OFFSET_HI_3:
403                         case VGT_STRMOUT_BUFFER_BASE_0:
404                         case VGT_STRMOUT_BUFFER_BASE_1:
405                         case VGT_STRMOUT_BUFFER_BASE_2:
406                         case VGT_STRMOUT_BUFFER_BASE_3:
407                         case VGT_STRMOUT_BUFFER_OFFSET_0:
408                         case VGT_STRMOUT_BUFFER_OFFSET_1:
409                         case VGT_STRMOUT_BUFFER_OFFSET_2:
410                         case VGT_STRMOUT_BUFFER_OFFSET_3:
411                                 /* These should be handled by STRMOUT_BUFFER packet 3 */
412                                 DRM_ERROR("bad context reg: 0x%08x\n", reg);
413                                 return -EINVAL;
414                         default:
415                                 break;
416                         }
417                 }
418                 break;
419         case PACKET3_SET_RESOURCE:
420                 if (pkt->count % 7) {
421                         DRM_ERROR("bad SET_RESOURCE\n");
422                         return -EINVAL;
423                 }
424                 start_reg = (ib[idx+0] << 2) + PACKET3_SET_RESOURCE_OFFSET;
425                 end_reg = 4 * pkt->count + start_reg - 4;
426                 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
427                     (start_reg >= PACKET3_SET_RESOURCE_END) ||
428                     (end_reg >= PACKET3_SET_RESOURCE_END)) {
429                         DRM_ERROR("bad SET_RESOURCE\n");
430                         return -EINVAL;
431                 }
432                 for (i = 0; i < (pkt->count / 7); i++) {
433                         switch (G__SQ_VTX_CONSTANT_TYPE(ib[idx+(i*7)+6+1])) {
434                         case SQ_TEX_VTX_VALID_TEXTURE:
435                                 /* tex base */
436                                 r = r600_cs_packet_next_reloc(p, &reloc);
437                                 if (r) {
438                                         DRM_ERROR("bad SET_RESOURCE\n");
439                                         return -EINVAL;
440                                 }
441                                 ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
442                                 /* tex mip base */
443                                 r = r600_cs_packet_next_reloc(p, &reloc);
444                                 if (r) {
445                                         DRM_ERROR("bad SET_RESOURCE\n");
446                                         return -EINVAL;
447                                 }
448                                 ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
449                                 break;
450                         case SQ_TEX_VTX_VALID_BUFFER:
451                                 /* vtx base */
452                                 r = r600_cs_packet_next_reloc(p, &reloc);
453                                 if (r) {
454                                         DRM_ERROR("bad SET_RESOURCE\n");
455                                         return -EINVAL;
456                                 }
457                                 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
458                                 ib[idx+1+(i*7)+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
459                                 break;
460                         case SQ_TEX_VTX_INVALID_TEXTURE:
461                         case SQ_TEX_VTX_INVALID_BUFFER:
462                         default:
463                                 DRM_ERROR("bad SET_RESOURCE\n");
464                                 return -EINVAL;
465                         }
466                 }
467                 break;
468         case PACKET3_SET_ALU_CONST:
469                 start_reg = (ib[idx+0] << 2) + PACKET3_SET_ALU_CONST_OFFSET;
470                 end_reg = 4 * pkt->count + start_reg - 4;
471                 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
472                     (start_reg >= PACKET3_SET_ALU_CONST_END) ||
473                     (end_reg >= PACKET3_SET_ALU_CONST_END)) {
474                         DRM_ERROR("bad SET_ALU_CONST\n");
475                         return -EINVAL;
476                 }
477                 break;
478         case PACKET3_SET_BOOL_CONST:
479                 start_reg = (ib[idx+0] << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
480                 end_reg = 4 * pkt->count + start_reg - 4;
481                 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
482                     (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
483                     (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
484                         DRM_ERROR("bad SET_BOOL_CONST\n");
485                         return -EINVAL;
486                 }
487                 break;
488         case PACKET3_SET_LOOP_CONST:
489                 start_reg = (ib[idx+0] << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
490                 end_reg = 4 * pkt->count + start_reg - 4;
491                 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
492                     (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
493                     (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
494                         DRM_ERROR("bad SET_LOOP_CONST\n");
495                         return -EINVAL;
496                 }
497                 break;
498         case PACKET3_SET_CTL_CONST:
499                 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CTL_CONST_OFFSET;
500                 end_reg = 4 * pkt->count + start_reg - 4;
501                 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
502                     (start_reg >= PACKET3_SET_CTL_CONST_END) ||
503                     (end_reg >= PACKET3_SET_CTL_CONST_END)) {
504                         DRM_ERROR("bad SET_CTL_CONST\n");
505                         return -EINVAL;
506                 }
507                 break;
508         case PACKET3_SET_SAMPLER:
509                 if (pkt->count % 3) {
510                         DRM_ERROR("bad SET_SAMPLER\n");
511                         return -EINVAL;
512                 }
513                 start_reg = (ib[idx+0] << 2) + PACKET3_SET_SAMPLER_OFFSET;
514                 end_reg = 4 * pkt->count + start_reg - 4;
515                 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
516                     (start_reg >= PACKET3_SET_SAMPLER_END) ||
517                     (end_reg >= PACKET3_SET_SAMPLER_END)) {
518                         DRM_ERROR("bad SET_SAMPLER\n");
519                         return -EINVAL;
520                 }
521                 break;
522         case PACKET3_SURFACE_BASE_UPDATE:
523                 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
524                         DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
525                         return -EINVAL;
526                 }
527                 if (pkt->count) {
528                         DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
529                         return -EINVAL;
530                 }
531                 break;
532         case PACKET3_NOP:
533                 break;
534         default:
535                 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
536                 return -EINVAL;
537         }
538         return 0;
539 }
540
541 int r600_cs_parse(struct radeon_cs_parser *p)
542 {
543         struct radeon_cs_packet pkt;
544         int r;
545
546         do {
547                 r = r600_cs_packet_parse(p, &pkt, p->idx);
548                 if (r) {
549                         return r;
550                 }
551                 p->idx += pkt.count + 2;
552                 switch (pkt.type) {
553                 case PACKET_TYPE0:
554                         r = r600_cs_parse_packet0(p, &pkt);
555                         break;
556                 case PACKET_TYPE2:
557                         break;
558                 case PACKET_TYPE3:
559                         r = r600_packet3_check(p, &pkt);
560                         break;
561                 default:
562                         DRM_ERROR("Unknown packet type %d !\n", pkt.type);
563                         return -EINVAL;
564                 }
565                 if (r) {
566                         return r;
567                 }
568         } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
569 #if 0
570         for (r = 0; r < p->ib->length_dw; r++) {
571                 printk(KERN_INFO "%05d  0x%08X\n", r, p->ib->ptr[r]);
572                 mdelay(1);
573         }
574 #endif
575         return 0;
576 }
577
578 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
579 {
580         if (p->chunk_relocs_idx == -1) {
581                 return 0;
582         }
583         p->relocs = kcalloc(1, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
584         if (p->relocs == NULL) {
585                 return -ENOMEM;
586         }
587         return 0;
588 }
589
590 /**
591  * cs_parser_fini() - clean parser states
592  * @parser:     parser structure holding parsing context.
593  * @error:      error number
594  *
595  * If error is set than unvalidate buffer, otherwise just free memory
596  * used by parsing context.
597  **/
598 static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
599 {
600         unsigned i;
601
602         kfree(parser->relocs);
603         for (i = 0; i < parser->nchunks; i++) {
604                 kfree(parser->chunks[i].kdata);
605         }
606         kfree(parser->chunks);
607         kfree(parser->chunks_array);
608 }
609
610 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
611                         unsigned family, u32 *ib, int *l)
612 {
613         struct radeon_cs_parser parser;
614         struct radeon_cs_chunk *ib_chunk;
615         struct radeon_ib        fake_ib;
616         int r;
617
618         /* initialize parser */
619         memset(&parser, 0, sizeof(struct radeon_cs_parser));
620         parser.filp = filp;
621         parser.rdev = NULL;
622         parser.family = family;
623         parser.ib = &fake_ib;
624         fake_ib.ptr = ib;
625         r = radeon_cs_parser_init(&parser, data);
626         if (r) {
627                 DRM_ERROR("Failed to initialize parser !\n");
628                 r600_cs_parser_fini(&parser, r);
629                 return r;
630         }
631         r = r600_cs_parser_relocs_legacy(&parser);
632         if (r) {
633                 DRM_ERROR("Failed to parse relocation !\n");
634                 r600_cs_parser_fini(&parser, r);
635                 return r;
636         }
637         /* Copy the packet into the IB, the parser will read from the
638          * input memory (cached) and write to the IB (which can be
639          * uncached). */
640         ib_chunk = &parser.chunks[parser.chunk_ib_idx];
641         parser.ib->length_dw = ib_chunk->length_dw;
642         memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
643         *l = parser.ib->length_dw;
644         r = r600_cs_parse(&parser);
645         if (r) {
646                 DRM_ERROR("Invalid command stream !\n");
647                 r600_cs_parser_fini(&parser, r);
648                 return r;
649         }
650         r600_cs_parser_fini(&parser, r);
651         return r;
652 }
653
654 void r600_cs_legacy_init(void)
655 {
656         r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
657 }