]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/radeon/evergreen_cs.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[net-next-2.6.git] / drivers / gpu / drm / radeon / evergreen_cs.c
CommitLineData
cb5fcbd5
AD
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon.h"
30#include "evergreend.h"
31#include "evergreen_reg_safe.h"
32
33static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
34 struct radeon_cs_reloc **cs_reloc);
35
36struct evergreen_cs_track {
37 u32 group_size;
38 u32 nbanks;
39 u32 npipes;
40 /* value we track */
41 u32 nsamples;
42 u32 cb_color_base_last[12];
43 struct radeon_bo *cb_color_bo[12];
44 u32 cb_color_bo_offset[12];
45 struct radeon_bo *cb_color_fmask_bo[8];
46 struct radeon_bo *cb_color_cmask_bo[8];
47 u32 cb_color_info[12];
48 u32 cb_color_view[12];
49 u32 cb_color_pitch_idx[12];
50 u32 cb_color_slice_idx[12];
51 u32 cb_color_dim_idx[12];
52 u32 cb_color_dim[12];
53 u32 cb_color_pitch[12];
54 u32 cb_color_slice[12];
55 u32 cb_color_cmask_slice[8];
56 u32 cb_color_fmask_slice[8];
57 u32 cb_target_mask;
58 u32 cb_shader_mask;
59 u32 vgt_strmout_config;
60 u32 vgt_strmout_buffer_config;
61 u32 db_depth_control;
62 u32 db_depth_view;
63 u32 db_depth_size;
64 u32 db_depth_size_idx;
65 u32 db_z_info;
66 u32 db_z_idx;
67 u32 db_z_read_offset;
68 u32 db_z_write_offset;
69 struct radeon_bo *db_z_read_bo;
70 struct radeon_bo *db_z_write_bo;
71 u32 db_s_info;
72 u32 db_s_idx;
73 u32 db_s_read_offset;
74 u32 db_s_write_offset;
75 struct radeon_bo *db_s_read_bo;
76 struct radeon_bo *db_s_write_bo;
77};
78
79static void evergreen_cs_track_init(struct evergreen_cs_track *track)
80{
81 int i;
82
83 for (i = 0; i < 8; i++) {
84 track->cb_color_fmask_bo[i] = NULL;
85 track->cb_color_cmask_bo[i] = NULL;
86 track->cb_color_cmask_slice[i] = 0;
87 track->cb_color_fmask_slice[i] = 0;
88 }
89
90 for (i = 0; i < 12; i++) {
91 track->cb_color_base_last[i] = 0;
92 track->cb_color_bo[i] = NULL;
93 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
94 track->cb_color_info[i] = 0;
95 track->cb_color_view[i] = 0;
96 track->cb_color_pitch_idx[i] = 0;
97 track->cb_color_slice_idx[i] = 0;
98 track->cb_color_dim[i] = 0;
99 track->cb_color_pitch[i] = 0;
100 track->cb_color_slice[i] = 0;
101 track->cb_color_dim[i] = 0;
102 }
103 track->cb_target_mask = 0xFFFFFFFF;
104 track->cb_shader_mask = 0xFFFFFFFF;
105
106 track->db_depth_view = 0xFFFFC000;
107 track->db_depth_size = 0xFFFFFFFF;
108 track->db_depth_size_idx = 0;
109 track->db_depth_control = 0xFFFFFFFF;
110 track->db_z_info = 0xFFFFFFFF;
111 track->db_z_idx = 0xFFFFFFFF;
112 track->db_z_read_offset = 0xFFFFFFFF;
113 track->db_z_write_offset = 0xFFFFFFFF;
114 track->db_z_read_bo = NULL;
115 track->db_z_write_bo = NULL;
116 track->db_s_info = 0xFFFFFFFF;
117 track->db_s_idx = 0xFFFFFFFF;
118 track->db_s_read_offset = 0xFFFFFFFF;
119 track->db_s_write_offset = 0xFFFFFFFF;
120 track->db_s_read_bo = NULL;
121 track->db_s_write_bo = NULL;
122}
123
124static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
125{
126 /* XXX fill in */
127 return 0;
128}
129
130static int evergreen_cs_track_check(struct radeon_cs_parser *p)
131{
132 struct evergreen_cs_track *track = p->track;
133
134 /* we don't support stream out buffer yet */
135 if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) {
136 dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
137 return -EINVAL;
138 }
139
140 /* XXX fill in */
141 return 0;
142}
143
144/**
145 * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
146 * @parser: parser structure holding parsing context.
147 * @pkt: where to store packet informations
148 *
149 * Assume that chunk_ib_index is properly set. Will return -EINVAL
150 * if packet is bigger than remaining ib size. or if packets is unknown.
151 **/
152int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
153 struct radeon_cs_packet *pkt,
154 unsigned idx)
155{
156 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
157 uint32_t header;
158
159 if (idx >= ib_chunk->length_dw) {
160 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
161 idx, ib_chunk->length_dw);
162 return -EINVAL;
163 }
164 header = radeon_get_ib_value(p, idx);
165 pkt->idx = idx;
166 pkt->type = CP_PACKET_GET_TYPE(header);
167 pkt->count = CP_PACKET_GET_COUNT(header);
168 pkt->one_reg_wr = 0;
169 switch (pkt->type) {
170 case PACKET_TYPE0:
171 pkt->reg = CP_PACKET0_GET_REG(header);
172 break;
173 case PACKET_TYPE3:
174 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
175 break;
176 case PACKET_TYPE2:
177 pkt->count = -1;
178 break;
179 default:
180 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
181 return -EINVAL;
182 }
183 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
184 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
185 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
186 return -EINVAL;
187 }
188 return 0;
189}
190
191/**
192 * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
193 * @parser: parser structure holding parsing context.
194 * @data: pointer to relocation data
195 * @offset_start: starting offset
196 * @offset_mask: offset mask (to align start offset on)
197 * @reloc: reloc informations
198 *
199 * Check next packet is relocation packet3, do bo validation and compute
200 * GPU offset using the provided start.
201 **/
202static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
203 struct radeon_cs_reloc **cs_reloc)
204{
205 struct radeon_cs_chunk *relocs_chunk;
206 struct radeon_cs_packet p3reloc;
207 unsigned idx;
208 int r;
209
210 if (p->chunk_relocs_idx == -1) {
211 DRM_ERROR("No relocation chunk !\n");
212 return -EINVAL;
213 }
214 *cs_reloc = NULL;
215 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
216 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
217 if (r) {
218 return r;
219 }
220 p->idx += p3reloc.count + 2;
221 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
222 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
223 p3reloc.idx);
224 return -EINVAL;
225 }
226 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
227 if (idx >= relocs_chunk->length_dw) {
228 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
229 idx, relocs_chunk->length_dw);
230 return -EINVAL;
231 }
232 /* FIXME: we assume reloc size is 4 dwords */
233 *cs_reloc = p->relocs_ptr[(idx / 4)];
234 return 0;
235}
236
237/**
238 * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
239 * @parser: parser structure holding parsing context.
240 *
241 * Check next packet is relocation packet3, do bo validation and compute
242 * GPU offset using the provided start.
243 **/
244static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
245{
246 struct radeon_cs_packet p3reloc;
247 int r;
248
249 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
250 if (r) {
251 return 0;
252 }
253 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
254 return 0;
255 }
256 return 1;
257}
258
259/**
260 * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
261 * @parser: parser structure holding parsing context.
262 *
263 * Userspace sends a special sequence for VLINE waits.
264 * PACKET0 - VLINE_START_END + value
265 * PACKET3 - WAIT_REG_MEM poll vline status reg
266 * RELOC (P3) - crtc_id in reloc.
267 *
268 * This function parses this and relocates the VLINE START END
269 * and WAIT_REG_MEM packets to the correct crtc.
270 * It also detects a switched off crtc and nulls out the
271 * wait in that case.
272 */
273static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
274{
275 struct drm_mode_object *obj;
276 struct drm_crtc *crtc;
277 struct radeon_crtc *radeon_crtc;
278 struct radeon_cs_packet p3reloc, wait_reg_mem;
279 int crtc_id;
280 int r;
281 uint32_t header, h_idx, reg, wait_reg_mem_info;
282 volatile uint32_t *ib;
283
284 ib = p->ib->ptr;
285
286 /* parse the WAIT_REG_MEM */
287 r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
288 if (r)
289 return r;
290
291 /* check its a WAIT_REG_MEM */
292 if (wait_reg_mem.type != PACKET_TYPE3 ||
293 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
294 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
295 r = -EINVAL;
296 return r;
297 }
298
299 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
300 /* bit 4 is reg (0) or mem (1) */
301 if (wait_reg_mem_info & 0x10) {
302 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
303 r = -EINVAL;
304 return r;
305 }
306 /* waiting for value to be equal */
307 if ((wait_reg_mem_info & 0x7) != 0x3) {
308 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
309 r = -EINVAL;
310 return r;
311 }
312 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
313 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
314 r = -EINVAL;
315 return r;
316 }
317
318 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
319 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
320 r = -EINVAL;
321 return r;
322 }
323
324 /* jump over the NOP */
325 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
326 if (r)
327 return r;
328
329 h_idx = p->idx - 2;
330 p->idx += wait_reg_mem.count + 2;
331 p->idx += p3reloc.count + 2;
332
333 header = radeon_get_ib_value(p, h_idx);
334 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
335 reg = CP_PACKET0_GET_REG(header);
336 mutex_lock(&p->rdev->ddev->mode_config.mutex);
337 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
338 if (!obj) {
339 DRM_ERROR("cannot find crtc %d\n", crtc_id);
340 r = -EINVAL;
341 goto out;
342 }
343 crtc = obj_to_crtc(obj);
344 radeon_crtc = to_radeon_crtc(crtc);
345 crtc_id = radeon_crtc->crtc_id;
346
347 if (!crtc->enabled) {
348 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
349 ib[h_idx + 2] = PACKET2(0);
350 ib[h_idx + 3] = PACKET2(0);
351 ib[h_idx + 4] = PACKET2(0);
352 ib[h_idx + 5] = PACKET2(0);
353 ib[h_idx + 6] = PACKET2(0);
354 ib[h_idx + 7] = PACKET2(0);
355 ib[h_idx + 8] = PACKET2(0);
356 } else {
357 switch (reg) {
358 case EVERGREEN_VLINE_START_END:
359 header &= ~R600_CP_PACKET0_REG_MASK;
360 header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
361 ib[h_idx] = header;
362 ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
363 break;
364 default:
365 DRM_ERROR("unknown crtc reloc\n");
366 r = -EINVAL;
367 goto out;
368 }
369 }
370out:
371 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
372 return r;
373}
374
375static int evergreen_packet0_check(struct radeon_cs_parser *p,
376 struct radeon_cs_packet *pkt,
377 unsigned idx, unsigned reg)
378{
379 int r;
380
381 switch (reg) {
382 case EVERGREEN_VLINE_START_END:
383 r = evergreen_cs_packet_parse_vline(p);
384 if (r) {
385 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
386 idx, reg);
387 return r;
388 }
389 break;
390 default:
391 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
392 reg, idx);
393 return -EINVAL;
394 }
395 return 0;
396}
397
398static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
399 struct radeon_cs_packet *pkt)
400{
401 unsigned reg, i;
402 unsigned idx;
403 int r;
404
405 idx = pkt->idx + 1;
406 reg = pkt->reg;
407 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
408 r = evergreen_packet0_check(p, pkt, idx, reg);
409 if (r) {
410 return r;
411 }
412 }
413 return 0;
414}
415
416/**
417 * evergreen_cs_check_reg() - check if register is authorized or not
418 * @parser: parser structure holding parsing context
419 * @reg: register we are testing
420 * @idx: index into the cs buffer
421 *
422 * This function will test against evergreen_reg_safe_bm and return 0
423 * if register is safe. If register is not flag as safe this function
424 * will test it against a list of register needind special handling.
425 */
426static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
427{
428 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
429 struct radeon_cs_reloc *reloc;
430 u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
431 u32 m, i, tmp, *ib;
432 int r;
433
434 i = (reg >> 7);
435 if (i > last_reg) {
436 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
437 return -EINVAL;
438 }
439 m = 1 << ((reg >> 2) & 31);
440 if (!(evergreen_reg_safe_bm[i] & m))
441 return 0;
442 ib = p->ib->ptr;
443 switch (reg) {
444 /* force following reg to 0 in an attemp to disable out buffer
445 * which will need us to better understand how it works to perform
446 * security check on it (Jerome)
447 */
448 case SQ_ESGS_RING_SIZE:
449 case SQ_GSVS_RING_SIZE:
450 case SQ_ESTMP_RING_SIZE:
451 case SQ_GSTMP_RING_SIZE:
452 case SQ_HSTMP_RING_SIZE:
453 case SQ_LSTMP_RING_SIZE:
454 case SQ_PSTMP_RING_SIZE:
455 case SQ_VSTMP_RING_SIZE:
456 case SQ_ESGS_RING_ITEMSIZE:
457 case SQ_ESTMP_RING_ITEMSIZE:
458 case SQ_GSTMP_RING_ITEMSIZE:
459 case SQ_GSVS_RING_ITEMSIZE:
460 case SQ_GS_VERT_ITEMSIZE:
461 case SQ_GS_VERT_ITEMSIZE_1:
462 case SQ_GS_VERT_ITEMSIZE_2:
463 case SQ_GS_VERT_ITEMSIZE_3:
464 case SQ_GSVS_RING_OFFSET_1:
465 case SQ_GSVS_RING_OFFSET_2:
466 case SQ_GSVS_RING_OFFSET_3:
467 case SQ_HSTMP_RING_ITEMSIZE:
468 case SQ_LSTMP_RING_ITEMSIZE:
469 case SQ_PSTMP_RING_ITEMSIZE:
470 case SQ_VSTMP_RING_ITEMSIZE:
471 case VGT_TF_RING_SIZE:
472 /* get value to populate the IB don't remove */
473 tmp =radeon_get_ib_value(p, idx);
474 ib[idx] = 0;
475 break;
476 case DB_DEPTH_CONTROL:
477 track->db_depth_control = radeon_get_ib_value(p, idx);
478 break;
479 case DB_Z_INFO:
480 r = evergreen_cs_packet_next_reloc(p, &reloc);
481 if (r) {
482 dev_warn(p->dev, "bad SET_CONTEXT_REG "
483 "0x%04X\n", reg);
484 return -EINVAL;
485 }
486 track->db_z_info = radeon_get_ib_value(p, idx);
487 ib[idx] &= ~Z_ARRAY_MODE(0xf);
488 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
489 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
490 ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
491 track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
492 } else {
493 ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
494 track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
495 }
496 break;
497 case DB_STENCIL_INFO:
498 track->db_s_info = radeon_get_ib_value(p, idx);
499 break;
500 case DB_DEPTH_VIEW:
501 track->db_depth_view = radeon_get_ib_value(p, idx);
502 break;
503 case DB_DEPTH_SIZE:
504 track->db_depth_size = radeon_get_ib_value(p, idx);
505 track->db_depth_size_idx = idx;
506 break;
507 case DB_Z_READ_BASE:
508 r = evergreen_cs_packet_next_reloc(p, &reloc);
509 if (r) {
510 dev_warn(p->dev, "bad SET_CONTEXT_REG "
511 "0x%04X\n", reg);
512 return -EINVAL;
513 }
514 track->db_z_read_offset = radeon_get_ib_value(p, idx);
515 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
516 track->db_z_read_bo = reloc->robj;
517 break;
518 case DB_Z_WRITE_BASE:
519 r = evergreen_cs_packet_next_reloc(p, &reloc);
520 if (r) {
521 dev_warn(p->dev, "bad SET_CONTEXT_REG "
522 "0x%04X\n", reg);
523 return -EINVAL;
524 }
525 track->db_z_write_offset = radeon_get_ib_value(p, idx);
526 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
527 track->db_z_write_bo = reloc->robj;
528 break;
529 case DB_STENCIL_READ_BASE:
530 r = evergreen_cs_packet_next_reloc(p, &reloc);
531 if (r) {
532 dev_warn(p->dev, "bad SET_CONTEXT_REG "
533 "0x%04X\n", reg);
534 return -EINVAL;
535 }
536 track->db_s_read_offset = radeon_get_ib_value(p, idx);
537 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
538 track->db_s_read_bo = reloc->robj;
539 break;
540 case DB_STENCIL_WRITE_BASE:
541 r = evergreen_cs_packet_next_reloc(p, &reloc);
542 if (r) {
543 dev_warn(p->dev, "bad SET_CONTEXT_REG "
544 "0x%04X\n", reg);
545 return -EINVAL;
546 }
547 track->db_s_write_offset = radeon_get_ib_value(p, idx);
548 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
549 track->db_s_write_bo = reloc->robj;
550 break;
551 case VGT_STRMOUT_CONFIG:
552 track->vgt_strmout_config = radeon_get_ib_value(p, idx);
553 break;
554 case VGT_STRMOUT_BUFFER_CONFIG:
555 track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
556 break;
557 case CB_TARGET_MASK:
558 track->cb_target_mask = radeon_get_ib_value(p, idx);
559 break;
560 case CB_SHADER_MASK:
561 track->cb_shader_mask = radeon_get_ib_value(p, idx);
562 break;
563 case PA_SC_AA_CONFIG:
564 tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
565 track->nsamples = 1 << tmp;
566 break;
567 case CB_COLOR0_VIEW:
568 case CB_COLOR1_VIEW:
569 case CB_COLOR2_VIEW:
570 case CB_COLOR3_VIEW:
571 case CB_COLOR4_VIEW:
572 case CB_COLOR5_VIEW:
573 case CB_COLOR6_VIEW:
574 case CB_COLOR7_VIEW:
575 tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
576 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
577 break;
578 case CB_COLOR8_VIEW:
579 case CB_COLOR9_VIEW:
580 case CB_COLOR10_VIEW:
581 case CB_COLOR11_VIEW:
582 tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
583 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
584 break;
585 case CB_COLOR0_INFO:
586 case CB_COLOR1_INFO:
587 case CB_COLOR2_INFO:
588 case CB_COLOR3_INFO:
589 case CB_COLOR4_INFO:
590 case CB_COLOR5_INFO:
591 case CB_COLOR6_INFO:
592 case CB_COLOR7_INFO:
593 r = evergreen_cs_packet_next_reloc(p, &reloc);
594 if (r) {
595 dev_warn(p->dev, "bad SET_CONTEXT_REG "
596 "0x%04X\n", reg);
597 return -EINVAL;
598 }
599 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
600 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
601 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
602 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
603 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
604 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
605 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
606 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
607 }
608 break;
609 case CB_COLOR8_INFO:
610 case CB_COLOR9_INFO:
611 case CB_COLOR10_INFO:
612 case CB_COLOR11_INFO:
613 r = evergreen_cs_packet_next_reloc(p, &reloc);
614 if (r) {
615 dev_warn(p->dev, "bad SET_CONTEXT_REG "
616 "0x%04X\n", reg);
617 return -EINVAL;
618 }
619 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
620 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
621 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
622 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
623 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
624 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
625 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
626 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
627 }
628 break;
629 case CB_COLOR0_PITCH:
630 case CB_COLOR1_PITCH:
631 case CB_COLOR2_PITCH:
632 case CB_COLOR3_PITCH:
633 case CB_COLOR4_PITCH:
634 case CB_COLOR5_PITCH:
635 case CB_COLOR6_PITCH:
636 case CB_COLOR7_PITCH:
637 tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
638 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
639 track->cb_color_pitch_idx[tmp] = idx;
640 break;
641 case CB_COLOR8_PITCH:
642 case CB_COLOR9_PITCH:
643 case CB_COLOR10_PITCH:
644 case CB_COLOR11_PITCH:
645 tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
646 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
647 track->cb_color_pitch_idx[tmp] = idx;
648 break;
649 case CB_COLOR0_SLICE:
650 case CB_COLOR1_SLICE:
651 case CB_COLOR2_SLICE:
652 case CB_COLOR3_SLICE:
653 case CB_COLOR4_SLICE:
654 case CB_COLOR5_SLICE:
655 case CB_COLOR6_SLICE:
656 case CB_COLOR7_SLICE:
657 tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
658 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
659 track->cb_color_slice_idx[tmp] = idx;
660 break;
661 case CB_COLOR8_SLICE:
662 case CB_COLOR9_SLICE:
663 case CB_COLOR10_SLICE:
664 case CB_COLOR11_SLICE:
665 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
666 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
667 track->cb_color_slice_idx[tmp] = idx;
668 break;
669 case CB_COLOR0_ATTRIB:
670 case CB_COLOR1_ATTRIB:
671 case CB_COLOR2_ATTRIB:
672 case CB_COLOR3_ATTRIB:
673 case CB_COLOR4_ATTRIB:
674 case CB_COLOR5_ATTRIB:
675 case CB_COLOR6_ATTRIB:
676 case CB_COLOR7_ATTRIB:
677 case CB_COLOR8_ATTRIB:
678 case CB_COLOR9_ATTRIB:
679 case CB_COLOR10_ATTRIB:
680 case CB_COLOR11_ATTRIB:
681 break;
682 case CB_COLOR0_DIM:
683 case CB_COLOR1_DIM:
684 case CB_COLOR2_DIM:
685 case CB_COLOR3_DIM:
686 case CB_COLOR4_DIM:
687 case CB_COLOR5_DIM:
688 case CB_COLOR6_DIM:
689 case CB_COLOR7_DIM:
690 tmp = (reg - CB_COLOR0_DIM) / 0x3c;
691 track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
692 track->cb_color_dim_idx[tmp] = idx;
693 break;
694 case CB_COLOR8_DIM:
695 case CB_COLOR9_DIM:
696 case CB_COLOR10_DIM:
697 case CB_COLOR11_DIM:
698 tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8;
699 track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
700 track->cb_color_dim_idx[tmp] = idx;
701 break;
702 case CB_COLOR0_FMASK:
703 case CB_COLOR1_FMASK:
704 case CB_COLOR2_FMASK:
705 case CB_COLOR3_FMASK:
706 case CB_COLOR4_FMASK:
707 case CB_COLOR5_FMASK:
708 case CB_COLOR6_FMASK:
709 case CB_COLOR7_FMASK:
710 tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
711 r = evergreen_cs_packet_next_reloc(p, &reloc);
712 if (r) {
713 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
714 return -EINVAL;
715 }
716 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
717 track->cb_color_fmask_bo[tmp] = reloc->robj;
718 break;
719 case CB_COLOR0_CMASK:
720 case CB_COLOR1_CMASK:
721 case CB_COLOR2_CMASK:
722 case CB_COLOR3_CMASK:
723 case CB_COLOR4_CMASK:
724 case CB_COLOR5_CMASK:
725 case CB_COLOR6_CMASK:
726 case CB_COLOR7_CMASK:
727 tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
728 r = evergreen_cs_packet_next_reloc(p, &reloc);
729 if (r) {
730 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
731 return -EINVAL;
732 }
733 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
734 track->cb_color_cmask_bo[tmp] = reloc->robj;
735 break;
736 case CB_COLOR0_FMASK_SLICE:
737 case CB_COLOR1_FMASK_SLICE:
738 case CB_COLOR2_FMASK_SLICE:
739 case CB_COLOR3_FMASK_SLICE:
740 case CB_COLOR4_FMASK_SLICE:
741 case CB_COLOR5_FMASK_SLICE:
742 case CB_COLOR6_FMASK_SLICE:
743 case CB_COLOR7_FMASK_SLICE:
744 tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
745 track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
746 break;
747 case CB_COLOR0_CMASK_SLICE:
748 case CB_COLOR1_CMASK_SLICE:
749 case CB_COLOR2_CMASK_SLICE:
750 case CB_COLOR3_CMASK_SLICE:
751 case CB_COLOR4_CMASK_SLICE:
752 case CB_COLOR5_CMASK_SLICE:
753 case CB_COLOR6_CMASK_SLICE:
754 case CB_COLOR7_CMASK_SLICE:
755 tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
756 track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
757 break;
758 case CB_COLOR0_BASE:
759 case CB_COLOR1_BASE:
760 case CB_COLOR2_BASE:
761 case CB_COLOR3_BASE:
762 case CB_COLOR4_BASE:
763 case CB_COLOR5_BASE:
764 case CB_COLOR6_BASE:
765 case CB_COLOR7_BASE:
766 r = evergreen_cs_packet_next_reloc(p, &reloc);
767 if (r) {
768 dev_warn(p->dev, "bad SET_CONTEXT_REG "
769 "0x%04X\n", reg);
770 return -EINVAL;
771 }
772 tmp = (reg - CB_COLOR0_BASE) / 0x3c;
773 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
774 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
775 track->cb_color_base_last[tmp] = ib[idx];
776 track->cb_color_bo[tmp] = reloc->robj;
777 break;
778 case CB_COLOR8_BASE:
779 case CB_COLOR9_BASE:
780 case CB_COLOR10_BASE:
781 case CB_COLOR11_BASE:
782 r = evergreen_cs_packet_next_reloc(p, &reloc);
783 if (r) {
784 dev_warn(p->dev, "bad SET_CONTEXT_REG "
785 "0x%04X\n", reg);
786 return -EINVAL;
787 }
788 tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
789 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
790 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
791 track->cb_color_base_last[tmp] = ib[idx];
792 track->cb_color_bo[tmp] = reloc->robj;
793 break;
794 case CB_IMMED0_BASE:
795 case CB_IMMED1_BASE:
796 case CB_IMMED2_BASE:
797 case CB_IMMED3_BASE:
798 case CB_IMMED4_BASE:
799 case CB_IMMED5_BASE:
800 case CB_IMMED6_BASE:
801 case CB_IMMED7_BASE:
802 case CB_IMMED8_BASE:
803 case CB_IMMED9_BASE:
804 case CB_IMMED10_BASE:
805 case CB_IMMED11_BASE:
806 case DB_HTILE_DATA_BASE:
807 case SQ_PGM_START_FS:
808 case SQ_PGM_START_ES:
809 case SQ_PGM_START_VS:
810 case SQ_PGM_START_GS:
811 case SQ_PGM_START_PS:
812 case SQ_PGM_START_HS:
813 case SQ_PGM_START_LS:
814 case GDS_ADDR_BASE:
815 case SQ_CONST_MEM_BASE:
816 case SQ_ALU_CONST_CACHE_GS_0:
817 case SQ_ALU_CONST_CACHE_GS_1:
818 case SQ_ALU_CONST_CACHE_GS_2:
819 case SQ_ALU_CONST_CACHE_GS_3:
820 case SQ_ALU_CONST_CACHE_GS_4:
821 case SQ_ALU_CONST_CACHE_GS_5:
822 case SQ_ALU_CONST_CACHE_GS_6:
823 case SQ_ALU_CONST_CACHE_GS_7:
824 case SQ_ALU_CONST_CACHE_GS_8:
825 case SQ_ALU_CONST_CACHE_GS_9:
826 case SQ_ALU_CONST_CACHE_GS_10:
827 case SQ_ALU_CONST_CACHE_GS_11:
828 case SQ_ALU_CONST_CACHE_GS_12:
829 case SQ_ALU_CONST_CACHE_GS_13:
830 case SQ_ALU_CONST_CACHE_GS_14:
831 case SQ_ALU_CONST_CACHE_GS_15:
832 case SQ_ALU_CONST_CACHE_PS_0:
833 case SQ_ALU_CONST_CACHE_PS_1:
834 case SQ_ALU_CONST_CACHE_PS_2:
835 case SQ_ALU_CONST_CACHE_PS_3:
836 case SQ_ALU_CONST_CACHE_PS_4:
837 case SQ_ALU_CONST_CACHE_PS_5:
838 case SQ_ALU_CONST_CACHE_PS_6:
839 case SQ_ALU_CONST_CACHE_PS_7:
840 case SQ_ALU_CONST_CACHE_PS_8:
841 case SQ_ALU_CONST_CACHE_PS_9:
842 case SQ_ALU_CONST_CACHE_PS_10:
843 case SQ_ALU_CONST_CACHE_PS_11:
844 case SQ_ALU_CONST_CACHE_PS_12:
845 case SQ_ALU_CONST_CACHE_PS_13:
846 case SQ_ALU_CONST_CACHE_PS_14:
847 case SQ_ALU_CONST_CACHE_PS_15:
848 case SQ_ALU_CONST_CACHE_VS_0:
849 case SQ_ALU_CONST_CACHE_VS_1:
850 case SQ_ALU_CONST_CACHE_VS_2:
851 case SQ_ALU_CONST_CACHE_VS_3:
852 case SQ_ALU_CONST_CACHE_VS_4:
853 case SQ_ALU_CONST_CACHE_VS_5:
854 case SQ_ALU_CONST_CACHE_VS_6:
855 case SQ_ALU_CONST_CACHE_VS_7:
856 case SQ_ALU_CONST_CACHE_VS_8:
857 case SQ_ALU_CONST_CACHE_VS_9:
858 case SQ_ALU_CONST_CACHE_VS_10:
859 case SQ_ALU_CONST_CACHE_VS_11:
860 case SQ_ALU_CONST_CACHE_VS_12:
861 case SQ_ALU_CONST_CACHE_VS_13:
862 case SQ_ALU_CONST_CACHE_VS_14:
863 case SQ_ALU_CONST_CACHE_VS_15:
864 case SQ_ALU_CONST_CACHE_HS_0:
865 case SQ_ALU_CONST_CACHE_HS_1:
866 case SQ_ALU_CONST_CACHE_HS_2:
867 case SQ_ALU_CONST_CACHE_HS_3:
868 case SQ_ALU_CONST_CACHE_HS_4:
869 case SQ_ALU_CONST_CACHE_HS_5:
870 case SQ_ALU_CONST_CACHE_HS_6:
871 case SQ_ALU_CONST_CACHE_HS_7:
872 case SQ_ALU_CONST_CACHE_HS_8:
873 case SQ_ALU_CONST_CACHE_HS_9:
874 case SQ_ALU_CONST_CACHE_HS_10:
875 case SQ_ALU_CONST_CACHE_HS_11:
876 case SQ_ALU_CONST_CACHE_HS_12:
877 case SQ_ALU_CONST_CACHE_HS_13:
878 case SQ_ALU_CONST_CACHE_HS_14:
879 case SQ_ALU_CONST_CACHE_HS_15:
880 case SQ_ALU_CONST_CACHE_LS_0:
881 case SQ_ALU_CONST_CACHE_LS_1:
882 case SQ_ALU_CONST_CACHE_LS_2:
883 case SQ_ALU_CONST_CACHE_LS_3:
884 case SQ_ALU_CONST_CACHE_LS_4:
885 case SQ_ALU_CONST_CACHE_LS_5:
886 case SQ_ALU_CONST_CACHE_LS_6:
887 case SQ_ALU_CONST_CACHE_LS_7:
888 case SQ_ALU_CONST_CACHE_LS_8:
889 case SQ_ALU_CONST_CACHE_LS_9:
890 case SQ_ALU_CONST_CACHE_LS_10:
891 case SQ_ALU_CONST_CACHE_LS_11:
892 case SQ_ALU_CONST_CACHE_LS_12:
893 case SQ_ALU_CONST_CACHE_LS_13:
894 case SQ_ALU_CONST_CACHE_LS_14:
895 case SQ_ALU_CONST_CACHE_LS_15:
896 r = evergreen_cs_packet_next_reloc(p, &reloc);
897 if (r) {
898 dev_warn(p->dev, "bad SET_CONTEXT_REG "
899 "0x%04X\n", reg);
900 return -EINVAL;
901 }
902 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
903 break;
904 default:
905 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
906 return -EINVAL;
907 }
908 return 0;
909}
910
911/**
912 * evergreen_check_texture_resource() - check if register is authorized or not
913 * @p: parser structure holding parsing context
914 * @idx: index into the cs buffer
915 * @texture: texture's bo structure
916 * @mipmap: mipmap's bo structure
917 *
918 * This function will check that the resource has valid field and that
919 * the texture and mipmap bo object are big enough to cover this resource.
920 */
921static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
922 struct radeon_bo *texture,
923 struct radeon_bo *mipmap)
924{
925 /* XXX fill in */
926 return 0;
927}
928
929static int evergreen_packet3_check(struct radeon_cs_parser *p,
930 struct radeon_cs_packet *pkt)
931{
932 struct radeon_cs_reloc *reloc;
933 struct evergreen_cs_track *track;
934 volatile u32 *ib;
935 unsigned idx;
936 unsigned i;
937 unsigned start_reg, end_reg, reg;
938 int r;
939 u32 idx_value;
940
941 track = (struct evergreen_cs_track *)p->track;
942 ib = p->ib->ptr;
943 idx = pkt->idx + 1;
944 idx_value = radeon_get_ib_value(p, idx);
945
946 switch (pkt->opcode) {
947 case PACKET3_CONTEXT_CONTROL:
948 if (pkt->count != 1) {
949 DRM_ERROR("bad CONTEXT_CONTROL\n");
950 return -EINVAL;
951 }
952 break;
953 case PACKET3_INDEX_TYPE:
954 case PACKET3_NUM_INSTANCES:
955 case PACKET3_CLEAR_STATE:
956 if (pkt->count) {
957 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
958 return -EINVAL;
959 }
960 break;
961 case PACKET3_INDEX_BASE:
962 if (pkt->count != 1) {
963 DRM_ERROR("bad INDEX_BASE\n");
964 return -EINVAL;
965 }
966 r = evergreen_cs_packet_next_reloc(p, &reloc);
967 if (r) {
968 DRM_ERROR("bad INDEX_BASE\n");
969 return -EINVAL;
970 }
971 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
972 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
973 r = evergreen_cs_track_check(p);
974 if (r) {
975 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
976 return r;
977 }
978 break;
979 case PACKET3_DRAW_INDEX:
980 if (pkt->count != 3) {
981 DRM_ERROR("bad DRAW_INDEX\n");
982 return -EINVAL;
983 }
984 r = evergreen_cs_packet_next_reloc(p, &reloc);
985 if (r) {
986 DRM_ERROR("bad DRAW_INDEX\n");
987 return -EINVAL;
988 }
989 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
990 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
991 r = evergreen_cs_track_check(p);
992 if (r) {
993 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
994 return r;
995 }
996 break;
997 case PACKET3_DRAW_INDEX_2:
998 if (pkt->count != 4) {
999 DRM_ERROR("bad DRAW_INDEX_2\n");
1000 return -EINVAL;
1001 }
1002 r = evergreen_cs_packet_next_reloc(p, &reloc);
1003 if (r) {
1004 DRM_ERROR("bad DRAW_INDEX_2\n");
1005 return -EINVAL;
1006 }
1007 ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1008 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1009 r = evergreen_cs_track_check(p);
1010 if (r) {
1011 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1012 return r;
1013 }
1014 break;
1015 case PACKET3_DRAW_INDEX_AUTO:
1016 if (pkt->count != 1) {
1017 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1018 return -EINVAL;
1019 }
1020 r = evergreen_cs_track_check(p);
1021 if (r) {
1022 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1023 return r;
1024 }
1025 break;
1026 case PACKET3_DRAW_INDEX_MULTI_AUTO:
1027 if (pkt->count != 2) {
1028 DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
1029 return -EINVAL;
1030 }
1031 r = evergreen_cs_track_check(p);
1032 if (r) {
1033 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1034 return r;
1035 }
1036 break;
1037 case PACKET3_DRAW_INDEX_IMMD:
1038 if (pkt->count < 2) {
1039 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1040 return -EINVAL;
1041 }
1042 r = evergreen_cs_track_check(p);
1043 if (r) {
1044 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1045 return r;
1046 }
1047 break;
1048 case PACKET3_DRAW_INDEX_OFFSET:
1049 if (pkt->count != 2) {
1050 DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
1051 return -EINVAL;
1052 }
1053 r = evergreen_cs_track_check(p);
1054 if (r) {
1055 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1056 return r;
1057 }
1058 break;
1059 case PACKET3_DRAW_INDEX_OFFSET_2:
1060 if (pkt->count != 3) {
1061 DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
1062 return -EINVAL;
1063 }
1064 r = evergreen_cs_track_check(p);
1065 if (r) {
1066 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1067 return r;
1068 }
1069 break;
1070 case PACKET3_WAIT_REG_MEM:
1071 if (pkt->count != 5) {
1072 DRM_ERROR("bad WAIT_REG_MEM\n");
1073 return -EINVAL;
1074 }
1075 /* bit 4 is reg (0) or mem (1) */
1076 if (idx_value & 0x10) {
1077 r = evergreen_cs_packet_next_reloc(p, &reloc);
1078 if (r) {
1079 DRM_ERROR("bad WAIT_REG_MEM\n");
1080 return -EINVAL;
1081 }
1082 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1083 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1084 }
1085 break;
1086 case PACKET3_SURFACE_SYNC:
1087 if (pkt->count != 3) {
1088 DRM_ERROR("bad SURFACE_SYNC\n");
1089 return -EINVAL;
1090 }
1091 /* 0xffffffff/0x0 is flush all cache flag */
1092 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1093 radeon_get_ib_value(p, idx + 2) != 0) {
1094 r = evergreen_cs_packet_next_reloc(p, &reloc);
1095 if (r) {
1096 DRM_ERROR("bad SURFACE_SYNC\n");
1097 return -EINVAL;
1098 }
1099 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1100 }
1101 break;
1102 case PACKET3_EVENT_WRITE:
1103 if (pkt->count != 2 && pkt->count != 0) {
1104 DRM_ERROR("bad EVENT_WRITE\n");
1105 return -EINVAL;
1106 }
1107 if (pkt->count) {
1108 r = evergreen_cs_packet_next_reloc(p, &reloc);
1109 if (r) {
1110 DRM_ERROR("bad EVENT_WRITE\n");
1111 return -EINVAL;
1112 }
1113 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1114 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1115 }
1116 break;
1117 case PACKET3_EVENT_WRITE_EOP:
1118 if (pkt->count != 4) {
1119 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1120 return -EINVAL;
1121 }
1122 r = evergreen_cs_packet_next_reloc(p, &reloc);
1123 if (r) {
1124 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1125 return -EINVAL;
1126 }
1127 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1128 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1129 break;
1130 case PACKET3_EVENT_WRITE_EOS:
1131 if (pkt->count != 3) {
1132 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1133 return -EINVAL;
1134 }
1135 r = evergreen_cs_packet_next_reloc(p, &reloc);
1136 if (r) {
1137 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1138 return -EINVAL;
1139 }
1140 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1141 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1142 break;
1143 case PACKET3_SET_CONFIG_REG:
1144 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
1145 end_reg = 4 * pkt->count + start_reg - 4;
1146 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
1147 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1148 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1149 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1150 return -EINVAL;
1151 }
1152 for (i = 0; i < pkt->count; i++) {
1153 reg = start_reg + (4 * i);
1154 r = evergreen_cs_check_reg(p, reg, idx+1+i);
1155 if (r)
1156 return r;
1157 }
1158 break;
1159 case PACKET3_SET_CONTEXT_REG:
1160 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
1161 end_reg = 4 * pkt->count + start_reg - 4;
1162 if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
1163 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
1164 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
1165 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1166 return -EINVAL;
1167 }
1168 for (i = 0; i < pkt->count; i++) {
1169 reg = start_reg + (4 * i);
1170 r = evergreen_cs_check_reg(p, reg, idx+1+i);
1171 if (r)
1172 return r;
1173 }
1174 break;
1175 case PACKET3_SET_RESOURCE:
1176 if (pkt->count % 8) {
1177 DRM_ERROR("bad SET_RESOURCE\n");
1178 return -EINVAL;
1179 }
1180 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
1181 end_reg = 4 * pkt->count + start_reg - 4;
1182 if ((start_reg < PACKET3_SET_RESOURCE_START) ||
1183 (start_reg >= PACKET3_SET_RESOURCE_END) ||
1184 (end_reg >= PACKET3_SET_RESOURCE_END)) {
1185 DRM_ERROR("bad SET_RESOURCE\n");
1186 return -EINVAL;
1187 }
1188 for (i = 0; i < (pkt->count / 8); i++) {
1189 struct radeon_bo *texture, *mipmap;
1190 u32 size, offset;
1191
1192 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
1193 case SQ_TEX_VTX_VALID_TEXTURE:
1194 /* tex base */
1195 r = evergreen_cs_packet_next_reloc(p, &reloc);
1196 if (r) {
1197 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1198 return -EINVAL;
1199 }
09d7e785 1200 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
cb5fcbd5
AD
1201 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1202 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
1203 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1204 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
1205 texture = reloc->robj;
1206 /* tex mip base */
1207 r = evergreen_cs_packet_next_reloc(p, &reloc);
1208 if (r) {
1209 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1210 return -EINVAL;
1211 }
09d7e785 1212 ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
cb5fcbd5
AD
1213 mipmap = reloc->robj;
1214 r = evergreen_check_texture_resource(p, idx+1+(i*8),
1215 texture, mipmap);
1216 if (r)
1217 return r;
1218 break;
1219 case SQ_TEX_VTX_VALID_BUFFER:
1220 /* vtx base */
1221 r = evergreen_cs_packet_next_reloc(p, &reloc);
1222 if (r) {
1223 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
1224 return -EINVAL;
1225 }
1226 offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
1227 size = radeon_get_ib_value(p, idx+1+(i*8)+1);
1228 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1229 /* force size to size of the buffer */
1230 dev_warn(p->dev, "vbo resource seems too big for the bo\n");
1231 ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj);
1232 }
1233 ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
1234 ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1235 break;
1236 case SQ_TEX_VTX_INVALID_TEXTURE:
1237 case SQ_TEX_VTX_INVALID_BUFFER:
1238 default:
1239 DRM_ERROR("bad SET_RESOURCE\n");
1240 return -EINVAL;
1241 }
1242 }
1243 break;
1244 case PACKET3_SET_ALU_CONST:
1245 /* XXX fix me ALU const buffers only */
1246 break;
1247 case PACKET3_SET_BOOL_CONST:
1248 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
1249 end_reg = 4 * pkt->count + start_reg - 4;
1250 if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
1251 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
1252 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
1253 DRM_ERROR("bad SET_BOOL_CONST\n");
1254 return -EINVAL;
1255 }
1256 break;
1257 case PACKET3_SET_LOOP_CONST:
1258 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
1259 end_reg = 4 * pkt->count + start_reg - 4;
1260 if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
1261 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
1262 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
1263 DRM_ERROR("bad SET_LOOP_CONST\n");
1264 return -EINVAL;
1265 }
1266 break;
1267 case PACKET3_SET_CTL_CONST:
1268 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
1269 end_reg = 4 * pkt->count + start_reg - 4;
1270 if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
1271 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
1272 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
1273 DRM_ERROR("bad SET_CTL_CONST\n");
1274 return -EINVAL;
1275 }
1276 break;
1277 case PACKET3_SET_SAMPLER:
1278 if (pkt->count % 3) {
1279 DRM_ERROR("bad SET_SAMPLER\n");
1280 return -EINVAL;
1281 }
1282 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
1283 end_reg = 4 * pkt->count + start_reg - 4;
1284 if ((start_reg < PACKET3_SET_SAMPLER_START) ||
1285 (start_reg >= PACKET3_SET_SAMPLER_END) ||
1286 (end_reg >= PACKET3_SET_SAMPLER_END)) {
1287 DRM_ERROR("bad SET_SAMPLER\n");
1288 return -EINVAL;
1289 }
1290 break;
1291 case PACKET3_NOP:
1292 break;
1293 default:
1294 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1295 return -EINVAL;
1296 }
1297 return 0;
1298}
1299
1300int evergreen_cs_parse(struct radeon_cs_parser *p)
1301{
1302 struct radeon_cs_packet pkt;
1303 struct evergreen_cs_track *track;
1304 int r;
1305
1306 if (p->track == NULL) {
1307 /* initialize tracker, we are in kms */
1308 track = kzalloc(sizeof(*track), GFP_KERNEL);
1309 if (track == NULL)
1310 return -ENOMEM;
1311 evergreen_cs_track_init(track);
1312 track->npipes = p->rdev->config.evergreen.tiling_npipes;
1313 track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
1314 track->group_size = p->rdev->config.evergreen.tiling_group_size;
1315 p->track = track;
1316 }
1317 do {
1318 r = evergreen_cs_packet_parse(p, &pkt, p->idx);
1319 if (r) {
1320 kfree(p->track);
1321 p->track = NULL;
1322 return r;
1323 }
1324 p->idx += pkt.count + 2;
1325 switch (pkt.type) {
1326 case PACKET_TYPE0:
1327 r = evergreen_cs_parse_packet0(p, &pkt);
1328 break;
1329 case PACKET_TYPE2:
1330 break;
1331 case PACKET_TYPE3:
1332 r = evergreen_packet3_check(p, &pkt);
1333 break;
1334 default:
1335 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1336 kfree(p->track);
1337 p->track = NULL;
1338 return -EINVAL;
1339 }
1340 if (r) {
1341 kfree(p->track);
1342 p->track = NULL;
1343 return r;
1344 }
1345 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1346#if 0
1347 for (r = 0; r < p->ib->length_dw; r++) {
1348 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
1349 mdelay(1);
1350 }
1351#endif
1352 kfree(p->track);
1353 p->track = NULL;
1354 return 0;
1355}
1356