]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/nouveau/nouveau_fence.c
Merge branch 'x86-mem-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[net-next-2.6.git] / drivers / gpu / drm / nouveau / nouveau_fence.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_dma.h"
32
33#define USE_REFCNT (dev_priv->card_type >= NV_10)
34
35struct nouveau_fence {
36 struct nouveau_channel *channel;
37 struct kref refcount;
38 struct list_head entry;
39
40 uint32_t sequence;
41 bool signalled;
42};
43
44static inline struct nouveau_fence *
45nouveau_fence(void *sync_obj)
46{
47 return (struct nouveau_fence *)sync_obj;
48}
49
50static void
51nouveau_fence_del(struct kref *ref)
52{
53 struct nouveau_fence *fence =
54 container_of(ref, struct nouveau_fence, refcount);
55
56 kfree(fence);
57}
58
59void
60nouveau_fence_update(struct nouveau_channel *chan)
61{
62 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
63 struct list_head *entry, *tmp;
64 struct nouveau_fence *fence;
65 uint32_t sequence;
66
3ba64623
FJ
67 spin_lock(&chan->fence.lock);
68
6ee73861
BS
69 if (USE_REFCNT)
70 sequence = nvchan_rd32(chan, 0x48);
71 else
047d1d3c 72 sequence = atomic_read(&chan->fence.last_sequence_irq);
6ee73861
BS
73
74 if (chan->fence.sequence_ack == sequence)
3ba64623 75 goto out;
6ee73861
BS
76 chan->fence.sequence_ack = sequence;
77
78 list_for_each_safe(entry, tmp, &chan->fence.pending) {
79 fence = list_entry(entry, struct nouveau_fence, entry);
80
81 sequence = fence->sequence;
82 fence->signalled = true;
83 list_del(&fence->entry);
84 kref_put(&fence->refcount, nouveau_fence_del);
85
86 if (sequence == chan->fence.sequence_ack)
87 break;
88 }
3ba64623 89out:
047d1d3c 90 spin_unlock(&chan->fence.lock);
6ee73861
BS
91}
92
93int
94nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
95 bool emit)
96{
97 struct nouveau_fence *fence;
98 int ret = 0;
99
100 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
101 if (!fence)
102 return -ENOMEM;
103 kref_init(&fence->refcount);
104 fence->channel = chan;
105
106 if (emit)
107 ret = nouveau_fence_emit(fence);
108
109 if (ret)
110 nouveau_fence_unref((void *)&fence);
111 *pfence = fence;
112 return ret;
113}
114
115struct nouveau_channel *
116nouveau_fence_channel(struct nouveau_fence *fence)
117{
118 return fence ? fence->channel : NULL;
119}
120
121int
122nouveau_fence_emit(struct nouveau_fence *fence)
123{
124 struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
125 struct nouveau_channel *chan = fence->channel;
6ee73861
BS
126 int ret;
127
128 ret = RING_SPACE(chan, 2);
129 if (ret)
130 return ret;
131
132 if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
6ee73861 133 nouveau_fence_update(chan);
6ee73861
BS
134
135 BUG_ON(chan->fence.sequence ==
136 chan->fence.sequence_ack - 1);
137 }
138
139 fence->sequence = ++chan->fence.sequence;
140
141 kref_get(&fence->refcount);
047d1d3c 142 spin_lock(&chan->fence.lock);
6ee73861 143 list_add_tail(&fence->entry, &chan->fence.pending);
047d1d3c 144 spin_unlock(&chan->fence.lock);
6ee73861 145
a5027ccd 146 BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
6ee73861
BS
147 OUT_RING(chan, fence->sequence);
148 FIRE_RING(chan);
149
150 return 0;
151}
152
153void
154nouveau_fence_unref(void **sync_obj)
155{
156 struct nouveau_fence *fence = nouveau_fence(*sync_obj);
157
158 if (fence)
159 kref_put(&fence->refcount, nouveau_fence_del);
160 *sync_obj = NULL;
161}
162
163void *
164nouveau_fence_ref(void *sync_obj)
165{
166 struct nouveau_fence *fence = nouveau_fence(sync_obj);
167
168 kref_get(&fence->refcount);
169 return sync_obj;
170}
171
172bool
173nouveau_fence_signalled(void *sync_obj, void *sync_arg)
174{
175 struct nouveau_fence *fence = nouveau_fence(sync_obj);
176 struct nouveau_channel *chan = fence->channel;
6ee73861
BS
177
178 if (fence->signalled)
179 return true;
180
6ee73861 181 nouveau_fence_update(chan);
6ee73861
BS
182 return fence->signalled;
183}
184
185int
186nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
187{
188 unsigned long timeout = jiffies + (3 * DRM_HZ);
189 int ret = 0;
190
6ee73861
BS
191 while (1) {
192 if (nouveau_fence_signalled(sync_obj, sync_arg))
193 break;
194
195 if (time_after_eq(jiffies, timeout)) {
196 ret = -EBUSY;
197 break;
198 }
199
05991110
KV
200 __set_current_state(intr ? TASK_INTERRUPTIBLE
201 : TASK_UNINTERRUPTIBLE);
6ee73861
BS
202 if (lazy)
203 schedule_timeout(1);
204
205 if (intr && signal_pending(current)) {
9ddc8c52 206 ret = -ERESTARTSYS;
6ee73861
BS
207 break;
208 }
209 }
210
211 __set_current_state(TASK_RUNNING);
212
213 return ret;
214}
215
216int
217nouveau_fence_flush(void *sync_obj, void *sync_arg)
218{
219 return 0;
220}
221
6ee73861
BS
222int
223nouveau_fence_init(struct nouveau_channel *chan)
224{
225 INIT_LIST_HEAD(&chan->fence.pending);
226 spin_lock_init(&chan->fence.lock);
047d1d3c 227 atomic_set(&chan->fence.last_sequence_irq, 0);
6ee73861
BS
228 return 0;
229}
230
231void
232nouveau_fence_fini(struct nouveau_channel *chan)
233{
234 struct list_head *entry, *tmp;
235 struct nouveau_fence *fence;
236
237 list_for_each_safe(entry, tmp, &chan->fence.pending) {
238 fence = list_entry(entry, struct nouveau_fence, entry);
239
240 fence->signalled = true;
241 list_del(&fence->entry);
242 kref_put(&fence->refcount, nouveau_fence_del);
243 }
244}
245