]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/radeon/r600_cs.c
drm/radeon/kms: drop taking lock around crtc lookup.
[net-next-2.6.git] / drivers / gpu / drm / radeon / r600_cs.c
CommitLineData
3ce0a23d
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon.h"
3ce0a23d 30#include "r600d.h"
961fb597 31#include "r600_reg_safe.h"
3ce0a23d
JG
32
33static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
34 struct radeon_cs_reloc **cs_reloc);
35static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
36 struct radeon_cs_reloc **cs_reloc);
37typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
38static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
961fb597
JG
39extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
40
3ce0a23d 41
c8c15ff1 42struct r600_cs_track {
961fb597
JG
43 /* configuration we miror so that we use same code btw kms/ums */
44 u32 group_size;
45 u32 nbanks;
46 u32 npipes;
47 /* value we track */
5f77df36 48 u32 sq_config;
961fb597
JG
49 u32 nsamples;
50 u32 cb_color_base_last[8];
51 struct radeon_bo *cb_color_bo[8];
52 u32 cb_color_bo_offset[8];
53 struct radeon_bo *cb_color_frag_bo[8];
54 struct radeon_bo *cb_color_tile_bo[8];
55 u32 cb_color_info[8];
56 u32 cb_color_size_idx[8];
57 u32 cb_target_mask;
58 u32 cb_shader_mask;
59 u32 cb_color_size[8];
60 u32 vgt_strmout_en;
61 u32 vgt_strmout_buffer_en;
62 u32 db_depth_control;
63 u32 db_depth_info;
64 u32 db_depth_size_idx;
65 u32 db_depth_view;
66 u32 db_depth_size;
67 u32 db_offset;
68 struct radeon_bo *db_bo;
c8c15ff1
JG
69};
70
961fb597
JG
71static inline int r600_bpe_from_format(u32 *bpe, u32 format)
72{
73 switch (format) {
74 case V_038004_COLOR_8:
75 case V_038004_COLOR_4_4:
76 case V_038004_COLOR_3_3_2:
77 case V_038004_FMT_1:
78 *bpe = 1;
79 break;
80 case V_038004_COLOR_16:
81 case V_038004_COLOR_16_FLOAT:
82 case V_038004_COLOR_8_8:
83 case V_038004_COLOR_5_6_5:
84 case V_038004_COLOR_6_5_5:
85 case V_038004_COLOR_1_5_5_5:
86 case V_038004_COLOR_4_4_4_4:
87 case V_038004_COLOR_5_5_5_1:
88 *bpe = 2;
89 break;
90 case V_038004_FMT_8_8_8:
91 *bpe = 3;
92 break;
93 case V_038004_COLOR_32:
94 case V_038004_COLOR_32_FLOAT:
95 case V_038004_COLOR_16_16:
96 case V_038004_COLOR_16_16_FLOAT:
97 case V_038004_COLOR_8_24:
98 case V_038004_COLOR_8_24_FLOAT:
99 case V_038004_COLOR_24_8:
100 case V_038004_COLOR_24_8_FLOAT:
101 case V_038004_COLOR_10_11_11:
102 case V_038004_COLOR_10_11_11_FLOAT:
103 case V_038004_COLOR_11_11_10:
104 case V_038004_COLOR_11_11_10_FLOAT:
105 case V_038004_COLOR_2_10_10_10:
106 case V_038004_COLOR_8_8_8_8:
107 case V_038004_COLOR_10_10_10_2:
108 case V_038004_FMT_5_9_9_9_SHAREDEXP:
109 case V_038004_FMT_32_AS_8:
110 case V_038004_FMT_32_AS_8_8:
111 *bpe = 4;
112 break;
113 case V_038004_COLOR_X24_8_32_FLOAT:
114 case V_038004_COLOR_32_32:
115 case V_038004_COLOR_32_32_FLOAT:
116 case V_038004_COLOR_16_16_16_16:
117 case V_038004_COLOR_16_16_16_16_FLOAT:
118 *bpe = 8;
119 break;
120 case V_038004_FMT_16_16_16:
121 case V_038004_FMT_16_16_16_FLOAT:
122 *bpe = 6;
123 break;
124 case V_038004_FMT_32_32_32:
125 case V_038004_FMT_32_32_32_FLOAT:
126 *bpe = 12;
127 break;
128 case V_038004_COLOR_32_32_32_32:
129 case V_038004_COLOR_32_32_32_32_FLOAT:
130 *bpe = 16;
131 break;
132 case V_038004_FMT_GB_GR:
133 case V_038004_FMT_BG_RG:
134 case V_038004_COLOR_INVALID:
135 *bpe = 16;
136 return -EINVAL;
137 }
138 return 0;
139}
140
141static void r600_cs_track_init(struct r600_cs_track *track)
142{
143 int i;
144
5f77df36
AD
145 /* assume DX9 mode */
146 track->sq_config = DX9_CONSTS;
961fb597
JG
147 for (i = 0; i < 8; i++) {
148 track->cb_color_base_last[i] = 0;
149 track->cb_color_size[i] = 0;
150 track->cb_color_size_idx[i] = 0;
151 track->cb_color_info[i] = 0;
152 track->cb_color_bo[i] = NULL;
153 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
154 }
155 track->cb_target_mask = 0xFFFFFFFF;
156 track->cb_shader_mask = 0xFFFFFFFF;
157 track->db_bo = NULL;
158 /* assume the biggest format and that htile is enabled */
159 track->db_depth_info = 7 | (1 << 25);
160 track->db_depth_view = 0xFFFFC000;
161 track->db_depth_size = 0xFFFFFFFF;
162 track->db_depth_size_idx = 0;
163 track->db_depth_control = 0xFFFFFFFF;
164}
165
166static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
167{
168 struct r600_cs_track *track = p->track;
71b10d87 169 u32 bpe = 0, pitch, slice_tile_max, size, tmp, height;
961fb597
JG
170 volatile u32 *ib = p->ib->ptr;
171
172 if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
173 dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
174 return -EINVAL;
175 }
176 size = radeon_bo_size(track->cb_color_bo[i]);
177 if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) {
178 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
179 __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]),
180 i, track->cb_color_info[i]);
181 return -EINVAL;
182 }
183 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) << 3;
184 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
185 if (!pitch) {
186 dev_warn(p->dev, "%s:%d cb pitch (%d) for %d invalid (0x%08X)\n",
187 __func__, __LINE__, pitch, i, track->cb_color_size[i]);
188 return -EINVAL;
189 }
190 height = size / (pitch * bpe);
191 if (height > 8192)
192 height = 8192;
193 switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) {
194 case V_0280A0_ARRAY_LINEAR_GENERAL:
195 case V_0280A0_ARRAY_LINEAR_ALIGNED:
196 if (pitch & 0x3f) {
197 dev_warn(p->dev, "%s:%d cb pitch (%d x %d = %d) invalid\n",
198 __func__, __LINE__, pitch, bpe, pitch * bpe);
199 return -EINVAL;
200 }
201 if ((pitch * bpe) & (track->group_size - 1)) {
202 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
203 __func__, __LINE__, pitch);
204 return -EINVAL;
205 }
206 break;
207 case V_0280A0_ARRAY_1D_TILED_THIN1:
208 if ((pitch * 8 * bpe * track->nsamples) & (track->group_size - 1)) {
209 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
210 __func__, __LINE__, pitch);
211 return -EINVAL;
212 }
213 height &= ~0x7;
214 if (!height)
215 height = 8;
216 break;
217 case V_0280A0_ARRAY_2D_TILED_THIN1:
218 if (pitch & ((8 * track->nbanks) - 1)) {
219 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
220 __func__, __LINE__, pitch);
221 return -EINVAL;
222 }
223 tmp = pitch * 8 * bpe * track->nsamples;
224 tmp = tmp / track->nbanks;
225 if (tmp & (track->group_size - 1)) {
226 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
227 __func__, __LINE__, pitch);
228 return -EINVAL;
229 }
230 height &= ~((16 * track->npipes) - 1);
231 if (!height)
232 height = 16 * track->npipes;
233 break;
234 default:
235 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
236 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
237 track->cb_color_info[i]);
238 return -EINVAL;
239 }
240 /* check offset */
241 tmp = height * pitch;
242 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
243 dev_warn(p->dev, "%s offset[%d] %d to big\n", __func__, i, track->cb_color_bo_offset[i]);
244 return -EINVAL;
245 }
246 /* limit max tile */
247 tmp = (height * pitch) >> 6;
248 if (tmp < slice_tile_max)
249 slice_tile_max = tmp;
250 tmp = S_028060_PITCH_TILE_MAX((pitch >> 3) - 1) |
251 S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
252 ib[track->cb_color_size_idx[i]] = tmp;
253 return 0;
254}
255
256static int r600_cs_track_check(struct radeon_cs_parser *p)
257{
258 struct r600_cs_track *track = p->track;
259 u32 tmp;
260 int r, i;
261 volatile u32 *ib = p->ib->ptr;
262
263 /* on legacy kernel we don't perform advanced check */
264 if (p->rdev == NULL)
265 return 0;
266 /* we don't support out buffer yet */
267 if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) {
268 dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
269 return -EINVAL;
270 }
271 /* check that we have a cb for each enabled target, we don't check
272 * shader_mask because it seems mesa isn't always setting it :(
273 */
274 tmp = track->cb_target_mask;
275 for (i = 0; i < 8; i++) {
276 if ((tmp >> (i * 4)) & 0xF) {
277 /* at least one component is enabled */
278 if (track->cb_color_bo[i] == NULL) {
279 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
280 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
281 return -EINVAL;
282 }
283 /* perform rewrite of CB_COLOR[0-7]_SIZE */
284 r = r600_cs_track_validate_cb(p, i);
285 if (r)
286 return r;
287 }
288 }
289 /* Check depth buffer */
290 if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
291 G_028800_Z_ENABLE(track->db_depth_control)) {
292 u32 nviews, bpe, ntiles;
293 if (track->db_bo == NULL) {
294 dev_warn(p->dev, "z/stencil with no depth buffer\n");
295 return -EINVAL;
296 }
297 if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
298 dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
299 return -EINVAL;
300 }
301 switch (G_028010_FORMAT(track->db_depth_info)) {
302 case V_028010_DEPTH_16:
303 bpe = 2;
304 break;
305 case V_028010_DEPTH_X8_24:
306 case V_028010_DEPTH_8_24:
307 case V_028010_DEPTH_X8_24_FLOAT:
308 case V_028010_DEPTH_8_24_FLOAT:
309 case V_028010_DEPTH_32_FLOAT:
310 bpe = 4;
311 break;
312 case V_028010_DEPTH_X24_8_32_FLOAT:
313 bpe = 8;
314 break;
315 default:
316 dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
317 return -EINVAL;
318 }
319 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
320 if (!track->db_depth_size_idx) {
321 dev_warn(p->dev, "z/stencil buffer size not set\n");
322 return -EINVAL;
323 }
324 printk_once(KERN_WARNING "You have old & broken userspace please consider updating mesa\n");
325 tmp = radeon_bo_size(track->db_bo) - track->db_offset;
326 tmp = (tmp / bpe) >> 6;
327 if (!tmp) {
328 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
329 track->db_depth_size, bpe, track->db_offset,
330 radeon_bo_size(track->db_bo));
331 return -EINVAL;
332 }
333 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
334 } else {
335 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
336 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
337 tmp = ntiles * bpe * 64 * nviews;
338 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
339 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n",
340 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
341 radeon_bo_size(track->db_bo));
342 return -EINVAL;
343 }
344 }
345 }
346 return 0;
347}
348
3ce0a23d
JG
349/**
350 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
351 * @parser: parser structure holding parsing context.
352 * @pkt: where to store packet informations
353 *
354 * Assume that chunk_ib_index is properly set. Will return -EINVAL
355 * if packet is bigger than remaining ib size. or if packets is unknown.
356 **/
357int r600_cs_packet_parse(struct radeon_cs_parser *p,
358 struct radeon_cs_packet *pkt,
359 unsigned idx)
360{
361 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
362 uint32_t header;
363
364 if (idx >= ib_chunk->length_dw) {
365 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
366 idx, ib_chunk->length_dw);
367 return -EINVAL;
368 }
513bcb46 369 header = radeon_get_ib_value(p, idx);
3ce0a23d
JG
370 pkt->idx = idx;
371 pkt->type = CP_PACKET_GET_TYPE(header);
372 pkt->count = CP_PACKET_GET_COUNT(header);
373 pkt->one_reg_wr = 0;
374 switch (pkt->type) {
375 case PACKET_TYPE0:
376 pkt->reg = CP_PACKET0_GET_REG(header);
377 break;
378 case PACKET_TYPE3:
379 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
380 break;
381 case PACKET_TYPE2:
382 pkt->count = -1;
383 break;
384 default:
385 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
386 return -EINVAL;
387 }
388 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
389 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
390 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
391 return -EINVAL;
392 }
393 return 0;
394}
395
396/**
397 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
398 * @parser: parser structure holding parsing context.
399 * @data: pointer to relocation data
400 * @offset_start: starting offset
401 * @offset_mask: offset mask (to align start offset on)
402 * @reloc: reloc informations
403 *
404 * Check next packet is relocation packet3, do bo validation and compute
405 * GPU offset using the provided start.
406 **/
407static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
408 struct radeon_cs_reloc **cs_reloc)
409{
3ce0a23d
JG
410 struct radeon_cs_chunk *relocs_chunk;
411 struct radeon_cs_packet p3reloc;
412 unsigned idx;
413 int r;
414
415 if (p->chunk_relocs_idx == -1) {
416 DRM_ERROR("No relocation chunk !\n");
417 return -EINVAL;
418 }
419 *cs_reloc = NULL;
3ce0a23d
JG
420 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
421 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
422 if (r) {
423 return r;
424 }
425 p->idx += p3reloc.count + 2;
426 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
427 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
428 p3reloc.idx);
429 return -EINVAL;
430 }
513bcb46 431 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
3ce0a23d
JG
432 if (idx >= relocs_chunk->length_dw) {
433 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
434 idx, relocs_chunk->length_dw);
435 return -EINVAL;
436 }
437 /* FIXME: we assume reloc size is 4 dwords */
438 *cs_reloc = p->relocs_ptr[(idx / 4)];
439 return 0;
440}
441
442/**
443 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
444 * @parser: parser structure holding parsing context.
445 * @data: pointer to relocation data
446 * @offset_start: starting offset
447 * @offset_mask: offset mask (to align start offset on)
448 * @reloc: reloc informations
449 *
450 * Check next packet is relocation packet3, do bo validation and compute
451 * GPU offset using the provided start.
452 **/
453static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
454 struct radeon_cs_reloc **cs_reloc)
455{
3ce0a23d
JG
456 struct radeon_cs_chunk *relocs_chunk;
457 struct radeon_cs_packet p3reloc;
458 unsigned idx;
459 int r;
460
461 if (p->chunk_relocs_idx == -1) {
462 DRM_ERROR("No relocation chunk !\n");
463 return -EINVAL;
464 }
465 *cs_reloc = NULL;
3ce0a23d
JG
466 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
467 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
468 if (r) {
469 return r;
470 }
471 p->idx += p3reloc.count + 2;
472 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
473 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
474 p3reloc.idx);
475 return -EINVAL;
476 }
513bcb46 477 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
3ce0a23d
JG
478 if (idx >= relocs_chunk->length_dw) {
479 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
480 idx, relocs_chunk->length_dw);
481 return -EINVAL;
482 }
e265f39e 483 *cs_reloc = p->relocs;
3ce0a23d
JG
484 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
485 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
486 return 0;
487}
488
c8c15ff1
JG
489/**
490 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
491 * @parser: parser structure holding parsing context.
492 *
493 * Check next packet is relocation packet3, do bo validation and compute
494 * GPU offset using the provided start.
495 **/
496static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
497{
498 struct radeon_cs_packet p3reloc;
499 int r;
500
501 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
502 if (r) {
503 return 0;
504 }
505 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
506 return 0;
507 }
508 return 1;
509}
510
2f67c6e0
AD
511/**
512 * r600_cs_packet_next_vline() - parse userspace VLINE packet
513 * @parser: parser structure holding parsing context.
514 *
515 * Userspace sends a special sequence for VLINE waits.
516 * PACKET0 - VLINE_START_END + value
517 * PACKET3 - WAIT_REG_MEM poll vline status reg
518 * RELOC (P3) - crtc_id in reloc.
519 *
520 * This function parses this and relocates the VLINE START END
521 * and WAIT_REG_MEM packets to the correct crtc.
522 * It also detects a switched off crtc and nulls out the
523 * wait in that case.
524 */
525static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
526{
527 struct drm_mode_object *obj;
528 struct drm_crtc *crtc;
529 struct radeon_crtc *radeon_crtc;
530 struct radeon_cs_packet p3reloc, wait_reg_mem;
531 int crtc_id;
532 int r;
533 uint32_t header, h_idx, reg, wait_reg_mem_info;
534 volatile uint32_t *ib;
535
536 ib = p->ib->ptr;
537
538 /* parse the WAIT_REG_MEM */
539 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
540 if (r)
541 return r;
542
543 /* check its a WAIT_REG_MEM */
544 if (wait_reg_mem.type != PACKET_TYPE3 ||
545 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
546 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
547 r = -EINVAL;
548 return r;
549 }
550
551 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
552 /* bit 4 is reg (0) or mem (1) */
553 if (wait_reg_mem_info & 0x10) {
554 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
555 r = -EINVAL;
556 return r;
557 }
558 /* waiting for value to be equal */
559 if ((wait_reg_mem_info & 0x7) != 0x3) {
560 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
561 r = -EINVAL;
562 return r;
563 }
564 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
565 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
566 r = -EINVAL;
567 return r;
568 }
569
570 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
571 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
572 r = -EINVAL;
573 return r;
574 }
575
576 /* jump over the NOP */
577 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
578 if (r)
579 return r;
580
581 h_idx = p->idx - 2;
582 p->idx += wait_reg_mem.count + 2;
583 p->idx += p3reloc.count + 2;
584
585 header = radeon_get_ib_value(p, h_idx);
586 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
d4ac6a05 587 reg = CP_PACKET0_GET_REG(header);
29508eb6 588
2f67c6e0
AD
589 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
590 if (!obj) {
591 DRM_ERROR("cannot find crtc %d\n", crtc_id);
592 r = -EINVAL;
593 goto out;
594 }
595 crtc = obj_to_crtc(obj);
596 radeon_crtc = to_radeon_crtc(crtc);
597 crtc_id = radeon_crtc->crtc_id;
598
599 if (!crtc->enabled) {
600 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
601 ib[h_idx + 2] = PACKET2(0);
602 ib[h_idx + 3] = PACKET2(0);
603 ib[h_idx + 4] = PACKET2(0);
604 ib[h_idx + 5] = PACKET2(0);
605 ib[h_idx + 6] = PACKET2(0);
606 ib[h_idx + 7] = PACKET2(0);
607 ib[h_idx + 8] = PACKET2(0);
608 } else if (crtc_id == 1) {
609 switch (reg) {
610 case AVIVO_D1MODE_VLINE_START_END:
611 header &= ~R600_CP_PACKET0_REG_MASK;
612 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
613 break;
614 default:
615 DRM_ERROR("unknown crtc reloc\n");
616 r = -EINVAL;
617 goto out;
618 }
619 ib[h_idx] = header;
620 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
621 }
622out:
2f67c6e0
AD
623 return r;
624}
625
3ce0a23d
JG
626static int r600_packet0_check(struct radeon_cs_parser *p,
627 struct radeon_cs_packet *pkt,
628 unsigned idx, unsigned reg)
629{
2f67c6e0
AD
630 int r;
631
3ce0a23d
JG
632 switch (reg) {
633 case AVIVO_D1MODE_VLINE_START_END:
2f67c6e0
AD
634 r = r600_cs_packet_parse_vline(p);
635 if (r) {
636 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
637 idx, reg);
638 return r;
639 }
3ce0a23d
JG
640 break;
641 default:
642 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
643 reg, idx);
644 return -EINVAL;
645 }
646 return 0;
647}
648
649static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
650 struct radeon_cs_packet *pkt)
651{
652 unsigned reg, i;
653 unsigned idx;
654 int r;
655
656 idx = pkt->idx + 1;
657 reg = pkt->reg;
658 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
659 r = r600_packet0_check(p, pkt, idx, reg);
660 if (r) {
661 return r;
662 }
663 }
664 return 0;
665}
666
961fb597
JG
667/**
668 * r600_cs_check_reg() - check if register is authorized or not
669 * @parser: parser structure holding parsing context
670 * @reg: register we are testing
671 * @idx: index into the cs buffer
672 *
673 * This function will test against r600_reg_safe_bm and return 0
674 * if register is safe. If register is not flag as safe this function
675 * will test it against a list of register needind special handling.
676 */
677static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
678{
679 struct r600_cs_track *track = (struct r600_cs_track *)p->track;
680 struct radeon_cs_reloc *reloc;
681 u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm);
682 u32 m, i, tmp, *ib;
683 int r;
684
685 i = (reg >> 7);
686 if (i > last_reg) {
687 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
688 return -EINVAL;
689 }
690 m = 1 << ((reg >> 2) & 31);
691 if (!(r600_reg_safe_bm[i] & m))
692 return 0;
693 ib = p->ib->ptr;
694 switch (reg) {
695 /* force following reg to 0 in an attemp to disable out buffer
696 * which will need us to better understand how it works to perform
697 * security check on it (Jerome)
698 */
699 case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
700 case R_008C44_SQ_ESGS_RING_SIZE:
701 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
702 case R_008C54_SQ_ESTMP_RING_SIZE:
703 case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
704 case R_008C74_SQ_FBUF_RING_SIZE:
705 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
706 case R_008C5C_SQ_GSTMP_RING_SIZE:
707 case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
708 case R_008C4C_SQ_GSVS_RING_SIZE:
709 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
710 case R_008C6C_SQ_PSTMP_RING_SIZE:
711 case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
712 case R_008C7C_SQ_REDUC_RING_SIZE:
713 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
714 case R_008C64_SQ_VSTMP_RING_SIZE:
715 case R_0288C8_SQ_GS_VERT_ITEMSIZE:
716 /* get value to populate the IB don't remove */
717 tmp =radeon_get_ib_value(p, idx);
718 ib[idx] = 0;
719 break;
5f77df36
AD
720 case SQ_CONFIG:
721 track->sq_config = radeon_get_ib_value(p, idx);
722 break;
961fb597
JG
723 case R_028800_DB_DEPTH_CONTROL:
724 track->db_depth_control = radeon_get_ib_value(p, idx);
725 break;
726 case R_028010_DB_DEPTH_INFO:
727 track->db_depth_info = radeon_get_ib_value(p, idx);
728 break;
729 case R_028004_DB_DEPTH_VIEW:
730 track->db_depth_view = radeon_get_ib_value(p, idx);
731 break;
732 case R_028000_DB_DEPTH_SIZE:
733 track->db_depth_size = radeon_get_ib_value(p, idx);
734 track->db_depth_size_idx = idx;
735 break;
736 case R_028AB0_VGT_STRMOUT_EN:
737 track->vgt_strmout_en = radeon_get_ib_value(p, idx);
738 break;
739 case R_028B20_VGT_STRMOUT_BUFFER_EN:
740 track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
741 break;
742 case R_028238_CB_TARGET_MASK:
743 track->cb_target_mask = radeon_get_ib_value(p, idx);
744 break;
745 case R_02823C_CB_SHADER_MASK:
746 track->cb_shader_mask = radeon_get_ib_value(p, idx);
747 break;
748 case R_028C04_PA_SC_AA_CONFIG:
749 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
750 track->nsamples = 1 << tmp;
751 break;
752 case R_0280A0_CB_COLOR0_INFO:
753 case R_0280A4_CB_COLOR1_INFO:
754 case R_0280A8_CB_COLOR2_INFO:
755 case R_0280AC_CB_COLOR3_INFO:
756 case R_0280B0_CB_COLOR4_INFO:
757 case R_0280B4_CB_COLOR5_INFO:
758 case R_0280B8_CB_COLOR6_INFO:
759 case R_0280BC_CB_COLOR7_INFO:
760 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
761 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
762 break;
763 case R_028060_CB_COLOR0_SIZE:
764 case R_028064_CB_COLOR1_SIZE:
765 case R_028068_CB_COLOR2_SIZE:
766 case R_02806C_CB_COLOR3_SIZE:
767 case R_028070_CB_COLOR4_SIZE:
768 case R_028074_CB_COLOR5_SIZE:
769 case R_028078_CB_COLOR6_SIZE:
770 case R_02807C_CB_COLOR7_SIZE:
771 tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
772 track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
773 track->cb_color_size_idx[tmp] = idx;
774 break;
775 /* This register were added late, there is userspace
776 * which does provide relocation for those but set
777 * 0 offset. In order to avoid breaking old userspace
778 * we detect this and set address to point to last
779 * CB_COLOR0_BASE, note that if userspace doesn't set
780 * CB_COLOR0_BASE before this register we will report
781 * error. Old userspace always set CB_COLOR0_BASE
782 * before any of this.
783 */
784 case R_0280E0_CB_COLOR0_FRAG:
785 case R_0280E4_CB_COLOR1_FRAG:
786 case R_0280E8_CB_COLOR2_FRAG:
787 case R_0280EC_CB_COLOR3_FRAG:
788 case R_0280F0_CB_COLOR4_FRAG:
789 case R_0280F4_CB_COLOR5_FRAG:
790 case R_0280F8_CB_COLOR6_FRAG:
791 case R_0280FC_CB_COLOR7_FRAG:
792 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
793 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
794 if (!track->cb_color_base_last[tmp]) {
795 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
796 return -EINVAL;
797 }
798 ib[idx] = track->cb_color_base_last[tmp];
799 printk_once(KERN_WARNING "You have old & broken userspace "
800 "please consider updating mesa & xf86-video-ati\n");
801 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
802 } else {
803 r = r600_cs_packet_next_reloc(p, &reloc);
804 if (r) {
805 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
806 return -EINVAL;
807 }
808 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
809 track->cb_color_frag_bo[tmp] = reloc->robj;
810 }
811 break;
812 case R_0280C0_CB_COLOR0_TILE:
813 case R_0280C4_CB_COLOR1_TILE:
814 case R_0280C8_CB_COLOR2_TILE:
815 case R_0280CC_CB_COLOR3_TILE:
816 case R_0280D0_CB_COLOR4_TILE:
817 case R_0280D4_CB_COLOR5_TILE:
818 case R_0280D8_CB_COLOR6_TILE:
819 case R_0280DC_CB_COLOR7_TILE:
820 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
821 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
822 if (!track->cb_color_base_last[tmp]) {
823 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
824 return -EINVAL;
825 }
826 ib[idx] = track->cb_color_base_last[tmp];
827 printk_once(KERN_WARNING "You have old & broken userspace "
828 "please consider updating mesa & xf86-video-ati\n");
829 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
830 } else {
831 r = r600_cs_packet_next_reloc(p, &reloc);
832 if (r) {
833 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
834 return -EINVAL;
835 }
836 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
837 track->cb_color_tile_bo[tmp] = reloc->robj;
838 }
839 break;
840 case CB_COLOR0_BASE:
841 case CB_COLOR1_BASE:
842 case CB_COLOR2_BASE:
843 case CB_COLOR3_BASE:
844 case CB_COLOR4_BASE:
845 case CB_COLOR5_BASE:
846 case CB_COLOR6_BASE:
847 case CB_COLOR7_BASE:
848 r = r600_cs_packet_next_reloc(p, &reloc);
849 if (r) {
850 dev_warn(p->dev, "bad SET_CONTEXT_REG "
851 "0x%04X\n", reg);
852 return -EINVAL;
853 }
7cb72ef4 854 tmp = (reg - CB_COLOR0_BASE) / 4;
961fb597
JG
855 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
856 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
961fb597
JG
857 track->cb_color_base_last[tmp] = ib[idx];
858 track->cb_color_bo[tmp] = reloc->robj;
859 break;
860 case DB_DEPTH_BASE:
861 r = r600_cs_packet_next_reloc(p, &reloc);
862 if (r) {
863 dev_warn(p->dev, "bad SET_CONTEXT_REG "
864 "0x%04X\n", reg);
865 return -EINVAL;
866 }
867 track->db_offset = radeon_get_ib_value(p, idx);
868 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
869 track->db_bo = reloc->robj;
870 break;
871 case DB_HTILE_DATA_BASE:
872 case SQ_PGM_START_FS:
873 case SQ_PGM_START_ES:
874 case SQ_PGM_START_VS:
875 case SQ_PGM_START_GS:
876 case SQ_PGM_START_PS:
5f77df36
AD
877 case SQ_ALU_CONST_CACHE_GS_0:
878 case SQ_ALU_CONST_CACHE_GS_1:
879 case SQ_ALU_CONST_CACHE_GS_2:
880 case SQ_ALU_CONST_CACHE_GS_3:
881 case SQ_ALU_CONST_CACHE_GS_4:
882 case SQ_ALU_CONST_CACHE_GS_5:
883 case SQ_ALU_CONST_CACHE_GS_6:
884 case SQ_ALU_CONST_CACHE_GS_7:
885 case SQ_ALU_CONST_CACHE_GS_8:
886 case SQ_ALU_CONST_CACHE_GS_9:
887 case SQ_ALU_CONST_CACHE_GS_10:
888 case SQ_ALU_CONST_CACHE_GS_11:
889 case SQ_ALU_CONST_CACHE_GS_12:
890 case SQ_ALU_CONST_CACHE_GS_13:
891 case SQ_ALU_CONST_CACHE_GS_14:
892 case SQ_ALU_CONST_CACHE_GS_15:
893 case SQ_ALU_CONST_CACHE_PS_0:
894 case SQ_ALU_CONST_CACHE_PS_1:
895 case SQ_ALU_CONST_CACHE_PS_2:
896 case SQ_ALU_CONST_CACHE_PS_3:
897 case SQ_ALU_CONST_CACHE_PS_4:
898 case SQ_ALU_CONST_CACHE_PS_5:
899 case SQ_ALU_CONST_CACHE_PS_6:
900 case SQ_ALU_CONST_CACHE_PS_7:
901 case SQ_ALU_CONST_CACHE_PS_8:
902 case SQ_ALU_CONST_CACHE_PS_9:
903 case SQ_ALU_CONST_CACHE_PS_10:
904 case SQ_ALU_CONST_CACHE_PS_11:
905 case SQ_ALU_CONST_CACHE_PS_12:
906 case SQ_ALU_CONST_CACHE_PS_13:
907 case SQ_ALU_CONST_CACHE_PS_14:
908 case SQ_ALU_CONST_CACHE_PS_15:
909 case SQ_ALU_CONST_CACHE_VS_0:
910 case SQ_ALU_CONST_CACHE_VS_1:
911 case SQ_ALU_CONST_CACHE_VS_2:
912 case SQ_ALU_CONST_CACHE_VS_3:
913 case SQ_ALU_CONST_CACHE_VS_4:
914 case SQ_ALU_CONST_CACHE_VS_5:
915 case SQ_ALU_CONST_CACHE_VS_6:
916 case SQ_ALU_CONST_CACHE_VS_7:
917 case SQ_ALU_CONST_CACHE_VS_8:
918 case SQ_ALU_CONST_CACHE_VS_9:
919 case SQ_ALU_CONST_CACHE_VS_10:
920 case SQ_ALU_CONST_CACHE_VS_11:
921 case SQ_ALU_CONST_CACHE_VS_12:
922 case SQ_ALU_CONST_CACHE_VS_13:
923 case SQ_ALU_CONST_CACHE_VS_14:
924 case SQ_ALU_CONST_CACHE_VS_15:
961fb597
JG
925 r = r600_cs_packet_next_reloc(p, &reloc);
926 if (r) {
927 dev_warn(p->dev, "bad SET_CONTEXT_REG "
928 "0x%04X\n", reg);
929 return -EINVAL;
930 }
931 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
932 break;
933 default:
934 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
935 return -EINVAL;
936 }
937 return 0;
938}
939
940static inline unsigned minify(unsigned size, unsigned levels)
941{
942 size = size >> levels;
943 if (size < 1)
944 size = 1;
945 return size;
946}
947
948static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
949 unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
950 unsigned *l0_size, unsigned *mipmap_size)
951{
952 unsigned offset, i, level, face;
953 unsigned width, height, depth, rowstride, size;
954
955 w0 = minify(w0, 0);
956 h0 = minify(h0, 0);
957 d0 = minify(d0, 0);
958 for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
959 width = minify(w0, i);
960 height = minify(h0, i);
961 depth = minify(d0, i);
962 for(face = 0; face < nfaces; face++) {
963 rowstride = ((width * bpe) + 255) & ~255;
964 size = height * rowstride * depth;
965 offset += size;
966 offset = (offset + 0x1f) & ~0x1f;
967 }
968 }
969 *l0_size = (((w0 * bpe) + 255) & ~255) * h0 * d0;
970 *mipmap_size = offset;
971 if (!blevel)
972 *mipmap_size -= *l0_size;
973 if (!nlevels)
974 *mipmap_size = *l0_size;
975}
976
977/**
978 * r600_check_texture_resource() - check if register is authorized or not
979 * @p: parser structure holding parsing context
980 * @idx: index into the cs buffer
981 * @texture: texture's bo structure
982 * @mipmap: mipmap's bo structure
983 *
984 * This function will check that the resource has valid field and that
985 * the texture and mipmap bo object are big enough to cover this resource.
986 */
987static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
988 struct radeon_bo *texture,
989 struct radeon_bo *mipmap)
990{
71b10d87 991 u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
961fb597
JG
992 u32 word0, word1, l0_size, mipmap_size;
993
994 /* on legacy kernel we don't perform advanced check */
995 if (p->rdev == NULL)
996 return 0;
997 word0 = radeon_get_ib_value(p, idx + 0);
998 word1 = radeon_get_ib_value(p, idx + 1);
999 w0 = G_038000_TEX_WIDTH(word0) + 1;
1000 h0 = G_038004_TEX_HEIGHT(word1) + 1;
1001 d0 = G_038004_TEX_DEPTH(word1);
1002 nfaces = 1;
1003 switch (G_038000_DIM(word0)) {
1004 case V_038000_SQ_TEX_DIM_1D:
1005 case V_038000_SQ_TEX_DIM_2D:
1006 case V_038000_SQ_TEX_DIM_3D:
1007 break;
1008 case V_038000_SQ_TEX_DIM_CUBEMAP:
1009 nfaces = 6;
1010 break;
1011 case V_038000_SQ_TEX_DIM_1D_ARRAY:
1012 case V_038000_SQ_TEX_DIM_2D_ARRAY:
1013 case V_038000_SQ_TEX_DIM_2D_MSAA:
1014 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
1015 default:
1016 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
1017 return -EINVAL;
1018 }
1019 if (r600_bpe_from_format(&bpe, G_038004_DATA_FORMAT(word1))) {
1020 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1021 __func__, __LINE__, G_038004_DATA_FORMAT(word1));
1022 return -EINVAL;
1023 }
1024 word0 = radeon_get_ib_value(p, idx + 4);
1025 word1 = radeon_get_ib_value(p, idx + 5);
1026 blevel = G_038010_BASE_LEVEL(word0);
1027 nlevels = G_038014_LAST_LEVEL(word1);
1028 r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe, &l0_size, &mipmap_size);
1029 /* using get ib will give us the offset into the texture bo */
1030 word0 = radeon_get_ib_value(p, idx + 2);
1031 if ((l0_size + word0) > radeon_bo_size(texture)) {
1032 dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
1033 w0, h0, bpe, word0, l0_size, radeon_bo_size(texture));
1034 return -EINVAL;
1035 }
1036 /* using get ib will give us the offset into the mipmap bo */
1037 word0 = radeon_get_ib_value(p, idx + 3);
1038 if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
1039 dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1040 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));
1041 return -EINVAL;
1042 }
1043 return 0;
1044}
1045
3ce0a23d
JG
1046static int r600_packet3_check(struct radeon_cs_parser *p,
1047 struct radeon_cs_packet *pkt)
1048{
3ce0a23d 1049 struct radeon_cs_reloc *reloc;
c8c15ff1 1050 struct r600_cs_track *track;
3ce0a23d
JG
1051 volatile u32 *ib;
1052 unsigned idx;
1053 unsigned i;
1054 unsigned start_reg, end_reg, reg;
1055 int r;
adea4796 1056 u32 idx_value;
3ce0a23d 1057
c8c15ff1 1058 track = (struct r600_cs_track *)p->track;
3ce0a23d 1059 ib = p->ib->ptr;
3ce0a23d 1060 idx = pkt->idx + 1;
adea4796 1061 idx_value = radeon_get_ib_value(p, idx);
513bcb46 1062
3ce0a23d
JG
1063 switch (pkt->opcode) {
1064 case PACKET3_START_3D_CMDBUF:
1065 if (p->family >= CHIP_RV770 || pkt->count) {
1066 DRM_ERROR("bad START_3D\n");
1067 return -EINVAL;
1068 }
1069 break;
1070 case PACKET3_CONTEXT_CONTROL:
1071 if (pkt->count != 1) {
1072 DRM_ERROR("bad CONTEXT_CONTROL\n");
1073 return -EINVAL;
1074 }
1075 break;
1076 case PACKET3_INDEX_TYPE:
1077 case PACKET3_NUM_INSTANCES:
1078 if (pkt->count) {
1079 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1080 return -EINVAL;
1081 }
1082 break;
1083 case PACKET3_DRAW_INDEX:
1084 if (pkt->count != 3) {
1085 DRM_ERROR("bad DRAW_INDEX\n");
1086 return -EINVAL;
1087 }
1088 r = r600_cs_packet_next_reloc(p, &reloc);
1089 if (r) {
1090 DRM_ERROR("bad DRAW_INDEX\n");
1091 return -EINVAL;
1092 }
adea4796 1093 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
210bed8f 1094 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
961fb597
JG
1095 r = r600_cs_track_check(p);
1096 if (r) {
1097 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1098 return r;
1099 }
3ce0a23d
JG
1100 break;
1101 case PACKET3_DRAW_INDEX_AUTO:
1102 if (pkt->count != 1) {
1103 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1104 return -EINVAL;
1105 }
961fb597
JG
1106 r = r600_cs_track_check(p);
1107 if (r) {
1108 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1109 return r;
1110 }
3ce0a23d
JG
1111 break;
1112 case PACKET3_DRAW_INDEX_IMMD_BE:
1113 case PACKET3_DRAW_INDEX_IMMD:
1114 if (pkt->count < 2) {
1115 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1116 return -EINVAL;
1117 }
961fb597
JG
1118 r = r600_cs_track_check(p);
1119 if (r) {
1120 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1121 return r;
1122 }
3ce0a23d
JG
1123 break;
1124 case PACKET3_WAIT_REG_MEM:
1125 if (pkt->count != 5) {
1126 DRM_ERROR("bad WAIT_REG_MEM\n");
1127 return -EINVAL;
1128 }
1129 /* bit 4 is reg (0) or mem (1) */
adea4796 1130 if (idx_value & 0x10) {
3ce0a23d
JG
1131 r = r600_cs_packet_next_reloc(p, &reloc);
1132 if (r) {
1133 DRM_ERROR("bad WAIT_REG_MEM\n");
1134 return -EINVAL;
1135 }
1136 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
210bed8f 1137 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
3ce0a23d
JG
1138 }
1139 break;
1140 case PACKET3_SURFACE_SYNC:
1141 if (pkt->count != 3) {
1142 DRM_ERROR("bad SURFACE_SYNC\n");
1143 return -EINVAL;
1144 }
1145 /* 0xffffffff/0x0 is flush all cache flag */
513bcb46
DA
1146 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1147 radeon_get_ib_value(p, idx + 2) != 0) {
3ce0a23d
JG
1148 r = r600_cs_packet_next_reloc(p, &reloc);
1149 if (r) {
1150 DRM_ERROR("bad SURFACE_SYNC\n");
1151 return -EINVAL;
1152 }
1153 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1154 }
1155 break;
1156 case PACKET3_EVENT_WRITE:
1157 if (pkt->count != 2 && pkt->count != 0) {
1158 DRM_ERROR("bad EVENT_WRITE\n");
1159 return -EINVAL;
1160 }
1161 if (pkt->count) {
1162 r = r600_cs_packet_next_reloc(p, &reloc);
1163 if (r) {
1164 DRM_ERROR("bad EVENT_WRITE\n");
1165 return -EINVAL;
1166 }
1167 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
210bed8f 1168 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
3ce0a23d
JG
1169 }
1170 break;
1171 case PACKET3_EVENT_WRITE_EOP:
1172 if (pkt->count != 4) {
1173 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1174 return -EINVAL;
1175 }
1176 r = r600_cs_packet_next_reloc(p, &reloc);
1177 if (r) {
1178 DRM_ERROR("bad EVENT_WRITE\n");
1179 return -EINVAL;
1180 }
1181 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
210bed8f 1182 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
3ce0a23d
JG
1183 break;
1184 case PACKET3_SET_CONFIG_REG:
adea4796 1185 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
3ce0a23d
JG
1186 end_reg = 4 * pkt->count + start_reg - 4;
1187 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
1188 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1189 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1190 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1191 return -EINVAL;
1192 }
1193 for (i = 0; i < pkt->count; i++) {
1194 reg = start_reg + (4 * i);
961fb597
JG
1195 r = r600_cs_check_reg(p, reg, idx+1+i);
1196 if (r)
1197 return r;
3ce0a23d
JG
1198 }
1199 break;
1200 case PACKET3_SET_CONTEXT_REG:
adea4796 1201 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
3ce0a23d
JG
1202 end_reg = 4 * pkt->count + start_reg - 4;
1203 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
1204 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
1205 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
1206 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1207 return -EINVAL;
1208 }
1209 for (i = 0; i < pkt->count; i++) {
1210 reg = start_reg + (4 * i);
961fb597
JG
1211 r = r600_cs_check_reg(p, reg, idx+1+i);
1212 if (r)
1213 return r;
3ce0a23d
JG
1214 }
1215 break;
1216 case PACKET3_SET_RESOURCE:
1217 if (pkt->count % 7) {
1218 DRM_ERROR("bad SET_RESOURCE\n");
1219 return -EINVAL;
1220 }
adea4796 1221 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
3ce0a23d
JG
1222 end_reg = 4 * pkt->count + start_reg - 4;
1223 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
1224 (start_reg >= PACKET3_SET_RESOURCE_END) ||
1225 (end_reg >= PACKET3_SET_RESOURCE_END)) {
1226 DRM_ERROR("bad SET_RESOURCE\n");
1227 return -EINVAL;
1228 }
1229 for (i = 0; i < (pkt->count / 7); i++) {
961fb597
JG
1230 struct radeon_bo *texture, *mipmap;
1231 u32 size, offset;
1232
adea4796 1233 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
3ce0a23d
JG
1234 case SQ_TEX_VTX_VALID_TEXTURE:
1235 /* tex base */
1236 r = r600_cs_packet_next_reloc(p, &reloc);
1237 if (r) {
1238 DRM_ERROR("bad SET_RESOURCE\n");
1239 return -EINVAL;
1240 }
1241 ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
961fb597 1242 texture = reloc->robj;
3ce0a23d
JG
1243 /* tex mip base */
1244 r = r600_cs_packet_next_reloc(p, &reloc);
1245 if (r) {
1246 DRM_ERROR("bad SET_RESOURCE\n");
1247 return -EINVAL;
1248 }
1249 ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
961fb597
JG
1250 mipmap = reloc->robj;
1251 r = r600_check_texture_resource(p, idx+(i*7)+1,
1252 texture, mipmap);
1253 if (r)
1254 return r;
3ce0a23d
JG
1255 break;
1256 case SQ_TEX_VTX_VALID_BUFFER:
1257 /* vtx base */
1258 r = r600_cs_packet_next_reloc(p, &reloc);
1259 if (r) {
1260 DRM_ERROR("bad SET_RESOURCE\n");
1261 return -EINVAL;
1262 }
961fb597
JG
1263 offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
1264 size = radeon_get_ib_value(p, idx+1+(i*7)+1);
1265 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1266 /* force size to size of the buffer */
1267 dev_warn(p->dev, "vbo resource seems too big for the bo\n");
1268 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
1269 }
3ce0a23d 1270 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
210bed8f 1271 ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
3ce0a23d
JG
1272 break;
1273 case SQ_TEX_VTX_INVALID_TEXTURE:
1274 case SQ_TEX_VTX_INVALID_BUFFER:
1275 default:
1276 DRM_ERROR("bad SET_RESOURCE\n");
1277 return -EINVAL;
1278 }
1279 }
1280 break;
1281 case PACKET3_SET_ALU_CONST:
5f77df36
AD
1282 if (track->sq_config & DX9_CONSTS) {
1283 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
1284 end_reg = 4 * pkt->count + start_reg - 4;
1285 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
1286 (start_reg >= PACKET3_SET_ALU_CONST_END) ||
1287 (end_reg >= PACKET3_SET_ALU_CONST_END)) {
1288 DRM_ERROR("bad SET_ALU_CONST\n");
1289 return -EINVAL;
1290 }
3ce0a23d
JG
1291 }
1292 break;
1293 case PACKET3_SET_BOOL_CONST:
adea4796 1294 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
3ce0a23d
JG
1295 end_reg = 4 * pkt->count + start_reg - 4;
1296 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
1297 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
1298 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
1299 DRM_ERROR("bad SET_BOOL_CONST\n");
1300 return -EINVAL;
1301 }
1302 break;
1303 case PACKET3_SET_LOOP_CONST:
adea4796 1304 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
3ce0a23d
JG
1305 end_reg = 4 * pkt->count + start_reg - 4;
1306 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
1307 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
1308 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
1309 DRM_ERROR("bad SET_LOOP_CONST\n");
1310 return -EINVAL;
1311 }
1312 break;
1313 case PACKET3_SET_CTL_CONST:
adea4796 1314 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
3ce0a23d
JG
1315 end_reg = 4 * pkt->count + start_reg - 4;
1316 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
1317 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
1318 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
1319 DRM_ERROR("bad SET_CTL_CONST\n");
1320 return -EINVAL;
1321 }
1322 break;
1323 case PACKET3_SET_SAMPLER:
1324 if (pkt->count % 3) {
1325 DRM_ERROR("bad SET_SAMPLER\n");
1326 return -EINVAL;
1327 }
adea4796 1328 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
3ce0a23d
JG
1329 end_reg = 4 * pkt->count + start_reg - 4;
1330 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
1331 (start_reg >= PACKET3_SET_SAMPLER_END) ||
1332 (end_reg >= PACKET3_SET_SAMPLER_END)) {
1333 DRM_ERROR("bad SET_SAMPLER\n");
1334 return -EINVAL;
1335 }
1336 break;
1337 case PACKET3_SURFACE_BASE_UPDATE:
1338 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
1339 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
1340 return -EINVAL;
1341 }
1342 if (pkt->count) {
1343 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
1344 return -EINVAL;
1345 }
1346 break;
1347 case PACKET3_NOP:
1348 break;
1349 default:
1350 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1351 return -EINVAL;
1352 }
1353 return 0;
1354}
1355
1356int r600_cs_parse(struct radeon_cs_parser *p)
1357{
1358 struct radeon_cs_packet pkt;
c8c15ff1 1359 struct r600_cs_track *track;
3ce0a23d
JG
1360 int r;
1361
961fb597
JG
1362 if (p->track == NULL) {
1363 /* initialize tracker, we are in kms */
1364 track = kzalloc(sizeof(*track), GFP_KERNEL);
1365 if (track == NULL)
1366 return -ENOMEM;
1367 r600_cs_track_init(track);
1368 if (p->rdev->family < CHIP_RV770) {
1369 track->npipes = p->rdev->config.r600.tiling_npipes;
1370 track->nbanks = p->rdev->config.r600.tiling_nbanks;
1371 track->group_size = p->rdev->config.r600.tiling_group_size;
1372 } else if (p->rdev->family <= CHIP_RV740) {
1373 track->npipes = p->rdev->config.rv770.tiling_npipes;
1374 track->nbanks = p->rdev->config.rv770.tiling_nbanks;
1375 track->group_size = p->rdev->config.rv770.tiling_group_size;
1376 }
1377 p->track = track;
1378 }
3ce0a23d
JG
1379 do {
1380 r = r600_cs_packet_parse(p, &pkt, p->idx);
1381 if (r) {
7cb72ef4
JG
1382 kfree(p->track);
1383 p->track = NULL;
3ce0a23d
JG
1384 return r;
1385 }
1386 p->idx += pkt.count + 2;
1387 switch (pkt.type) {
1388 case PACKET_TYPE0:
1389 r = r600_cs_parse_packet0(p, &pkt);
1390 break;
1391 case PACKET_TYPE2:
1392 break;
1393 case PACKET_TYPE3:
1394 r = r600_packet3_check(p, &pkt);
1395 break;
1396 default:
1397 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
961fb597 1398 kfree(p->track);
7cb72ef4 1399 p->track = NULL;
3ce0a23d
JG
1400 return -EINVAL;
1401 }
1402 if (r) {
961fb597 1403 kfree(p->track);
7cb72ef4 1404 p->track = NULL;
3ce0a23d
JG
1405 return r;
1406 }
1407 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1408#if 0
1409 for (r = 0; r < p->ib->length_dw; r++) {
1410 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
1411 mdelay(1);
1412 }
1413#endif
961fb597 1414 kfree(p->track);
7cb72ef4 1415 p->track = NULL;
3ce0a23d
JG
1416 return 0;
1417}
1418
1419static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
1420{
1421 if (p->chunk_relocs_idx == -1) {
1422 return 0;
1423 }
e265f39e 1424 p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
3ce0a23d
JG
1425 if (p->relocs == NULL) {
1426 return -ENOMEM;
1427 }
1428 return 0;
1429}
1430
1431/**
1432 * cs_parser_fini() - clean parser states
1433 * @parser: parser structure holding parsing context.
1434 * @error: error number
1435 *
1436 * If error is set than unvalidate buffer, otherwise just free memory
1437 * used by parsing context.
1438 **/
1439static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
1440{
1441 unsigned i;
1442
1443 kfree(parser->relocs);
1444 for (i = 0; i < parser->nchunks; i++) {
1445 kfree(parser->chunks[i].kdata);
4c57edba
DA
1446 kfree(parser->chunks[i].kpage[0]);
1447 kfree(parser->chunks[i].kpage[1]);
3ce0a23d
JG
1448 }
1449 kfree(parser->chunks);
1450 kfree(parser->chunks_array);
1451}
1452
1453int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
1454 unsigned family, u32 *ib, int *l)
1455{
1456 struct radeon_cs_parser parser;
1457 struct radeon_cs_chunk *ib_chunk;
961fb597
JG
1458 struct radeon_ib fake_ib;
1459 struct r600_cs_track *track;
3ce0a23d
JG
1460 int r;
1461
961fb597
JG
1462 /* initialize tracker */
1463 track = kzalloc(sizeof(*track), GFP_KERNEL);
1464 if (track == NULL)
1465 return -ENOMEM;
1466 r600_cs_track_init(track);
1467 r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
3ce0a23d
JG
1468 /* initialize parser */
1469 memset(&parser, 0, sizeof(struct radeon_cs_parser));
1470 parser.filp = filp;
c8c15ff1 1471 parser.dev = &dev->pdev->dev;
3ce0a23d
JG
1472 parser.rdev = NULL;
1473 parser.family = family;
1474 parser.ib = &fake_ib;
961fb597 1475 parser.track = track;
3ce0a23d
JG
1476 fake_ib.ptr = ib;
1477 r = radeon_cs_parser_init(&parser, data);
1478 if (r) {
1479 DRM_ERROR("Failed to initialize parser !\n");
1480 r600_cs_parser_fini(&parser, r);
1481 return r;
1482 }
1483 r = r600_cs_parser_relocs_legacy(&parser);
1484 if (r) {
1485 DRM_ERROR("Failed to parse relocation !\n");
1486 r600_cs_parser_fini(&parser, r);
1487 return r;
1488 }
1489 /* Copy the packet into the IB, the parser will read from the
1490 * input memory (cached) and write to the IB (which can be
1491 * uncached). */
1492 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
1493 parser.ib->length_dw = ib_chunk->length_dw;
3ce0a23d
JG
1494 *l = parser.ib->length_dw;
1495 r = r600_cs_parse(&parser);
1496 if (r) {
1497 DRM_ERROR("Invalid command stream !\n");
1498 r600_cs_parser_fini(&parser, r);
1499 return r;
1500 }
513bcb46
DA
1501 r = radeon_cs_finish_pages(&parser);
1502 if (r) {
1503 DRM_ERROR("Invalid command stream !\n");
1504 r600_cs_parser_fini(&parser, r);
1505 return r;
1506 }
3ce0a23d
JG
1507 r600_cs_parser_fini(&parser, r);
1508 return r;
1509}
1510
1511void r600_cs_legacy_init(void)
1512{
1513 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
1514}