]>
Commit | Line | Data |
---|---|---|
5cbafa65 DW |
1 | /* |
2 | * Intel I/OAT DMA Linux driver | |
3 | * Copyright(c) 2004 - 2009 Intel Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., | |
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in | |
19 | * the file called "COPYING". | |
20 | * | |
21 | */ | |
22 | ||
23 | /* | |
24 | * This driver supports an Intel I/OAT DMA engine (versions >= 2), which | |
25 | * does asynchronous data movement and checksumming operations. | |
26 | */ | |
27 | ||
28 | #include <linux/init.h> | |
29 | #include <linux/module.h> | |
30 | #include <linux/pci.h> | |
31 | #include <linux/interrupt.h> | |
32 | #include <linux/dmaengine.h> | |
33 | #include <linux/delay.h> | |
34 | #include <linux/dma-mapping.h> | |
35 | #include <linux/workqueue.h> | |
36 | #include <linux/i7300_idle.h> | |
37 | #include "dma.h" | |
38 | #include "dma_v2.h" | |
39 | #include "registers.h" | |
40 | #include "hw.h" | |
41 | ||
42 | static int ioat_ring_alloc_order = 8; | |
43 | module_param(ioat_ring_alloc_order, int, 0644); | |
44 | MODULE_PARM_DESC(ioat_ring_alloc_order, | |
45 | "ioat2+: allocate 2^n descriptors per channel (default: n=8)"); | |
46 | ||
47 | static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) | |
48 | { | |
49 | void * __iomem reg_base = ioat->base.reg_base; | |
50 | ||
51 | ioat->pending = 0; | |
09c8a5b8 | 52 | ioat->dmacount += ioat2_ring_pending(ioat);; |
5cbafa65 DW |
53 | ioat->issued = ioat->head; |
54 | /* make descriptor updates globally visible before notifying channel */ | |
55 | wmb(); | |
56 | writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | |
6df9183a DW |
57 | dev_dbg(to_dev(&ioat->base), |
58 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", | |
59 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | |
5cbafa65 DW |
60 | } |
61 | ||
62 | static void ioat2_issue_pending(struct dma_chan *chan) | |
63 | { | |
64 | struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); | |
65 | ||
66 | spin_lock_bh(&ioat->ring_lock); | |
67 | if (ioat->pending == 1) | |
68 | __ioat2_issue_pending(ioat); | |
69 | spin_unlock_bh(&ioat->ring_lock); | |
70 | } | |
71 | ||
72 | /** | |
73 | * ioat2_update_pending - log pending descriptors | |
74 | * @ioat: ioat2+ channel | |
75 | * | |
76 | * set pending to '1' unless pending is already set to '2', pending == 2 | |
77 | * indicates that submission is temporarily blocked due to an in-flight | |
78 | * reset. If we are already above the ioat_pending_level threshold then | |
79 | * just issue pending. | |
80 | * | |
81 | * called with ring_lock held | |
82 | */ | |
83 | static void ioat2_update_pending(struct ioat2_dma_chan *ioat) | |
84 | { | |
85 | if (unlikely(ioat->pending == 2)) | |
86 | return; | |
87 | else if (ioat2_ring_pending(ioat) > ioat_pending_level) | |
88 | __ioat2_issue_pending(ioat); | |
89 | else | |
90 | ioat->pending = 1; | |
91 | } | |
92 | ||
93 | static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |
94 | { | |
5cbafa65 DW |
95 | struct ioat_ring_ent *desc; |
96 | struct ioat_dma_descriptor *hw; | |
97 | int idx; | |
98 | ||
99 | if (ioat2_ring_space(ioat) < 1) { | |
100 | dev_err(to_dev(&ioat->base), | |
101 | "Unable to start null desc - ring full\n"); | |
102 | return; | |
103 | } | |
104 | ||
6df9183a DW |
105 | dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", |
106 | __func__, ioat->head, ioat->tail, ioat->issued); | |
5cbafa65 DW |
107 | idx = ioat2_desc_alloc(ioat, 1); |
108 | desc = ioat2_get_ring_ent(ioat, idx); | |
109 | ||
110 | hw = desc->hw; | |
111 | hw->ctl = 0; | |
112 | hw->ctl_f.null = 1; | |
113 | hw->ctl_f.int_en = 1; | |
114 | hw->ctl_f.compl_write = 1; | |
115 | /* set size to non-zero value (channel returns error when size is 0) */ | |
116 | hw->size = NULL_DESC_BUFFER_SIZE; | |
117 | hw->src_addr = 0; | |
118 | hw->dst_addr = 0; | |
119 | async_tx_ack(&desc->txd); | |
09c8a5b8 | 120 | ioat2_set_chainaddr(ioat, desc->txd.phys); |
6df9183a | 121 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
122 | __ioat2_issue_pending(ioat); |
123 | } | |
124 | ||
125 | static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |
126 | { | |
127 | spin_lock_bh(&ioat->ring_lock); | |
128 | __ioat2_start_null_desc(ioat); | |
129 | spin_unlock_bh(&ioat->ring_lock); | |
130 | } | |
131 | ||
09c8a5b8 | 132 | static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) |
5cbafa65 | 133 | { |
5cbafa65 | 134 | struct ioat_chan_common *chan = &ioat->base; |
09c8a5b8 | 135 | struct dma_async_tx_descriptor *tx; |
5cbafa65 DW |
136 | struct ioat_ring_ent *desc; |
137 | bool seen_current = false; | |
138 | u16 active; | |
139 | int i; | |
5cbafa65 | 140 | |
6df9183a DW |
141 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", |
142 | __func__, ioat->head, ioat->tail, ioat->issued); | |
143 | ||
5cbafa65 DW |
144 | active = ioat2_ring_active(ioat); |
145 | for (i = 0; i < active && !seen_current; i++) { | |
146 | prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); | |
147 | desc = ioat2_get_ring_ent(ioat, ioat->tail + i); | |
148 | tx = &desc->txd; | |
6df9183a | 149 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
150 | if (tx->cookie) { |
151 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | |
152 | chan->completed_cookie = tx->cookie; | |
153 | tx->cookie = 0; | |
154 | if (tx->callback) { | |
155 | tx->callback(tx->callback_param); | |
156 | tx->callback = NULL; | |
157 | } | |
158 | } | |
159 | ||
160 | if (tx->phys == phys_complete) | |
161 | seen_current = true; | |
162 | } | |
163 | ioat->tail += i; | |
164 | BUG_ON(!seen_current); /* no active descs have written a completion? */ | |
5cbafa65 DW |
165 | |
166 | chan->last_completion = phys_complete; | |
09c8a5b8 DW |
167 | if (ioat->head == ioat->tail) { |
168 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", | |
169 | __func__); | |
170 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
171 | } | |
172 | } | |
173 | ||
174 | /** | |
175 | * ioat2_cleanup - clean finished descriptors (advance tail pointer) | |
176 | * @chan: ioat channel to be cleaned up | |
177 | */ | |
178 | static void ioat2_cleanup(struct ioat2_dma_chan *ioat) | |
179 | { | |
180 | struct ioat_chan_common *chan = &ioat->base; | |
181 | unsigned long phys_complete; | |
5cbafa65 | 182 | |
09c8a5b8 DW |
183 | prefetch(chan->completion); |
184 | ||
185 | if (!spin_trylock_bh(&chan->cleanup_lock)) | |
186 | return; | |
187 | ||
188 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | |
189 | spin_unlock_bh(&chan->cleanup_lock); | |
190 | return; | |
191 | } | |
192 | ||
193 | if (!spin_trylock_bh(&ioat->ring_lock)) { | |
194 | spin_unlock_bh(&chan->cleanup_lock); | |
195 | return; | |
196 | } | |
197 | ||
198 | __cleanup(ioat, phys_complete); | |
199 | ||
200 | spin_unlock_bh(&ioat->ring_lock); | |
5cbafa65 DW |
201 | spin_unlock_bh(&chan->cleanup_lock); |
202 | } | |
203 | ||
204 | static void ioat2_cleanup_tasklet(unsigned long data) | |
205 | { | |
206 | struct ioat2_dma_chan *ioat = (void *) data; | |
207 | ||
208 | ioat2_cleanup(ioat); | |
f6ab95b5 | 209 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
5cbafa65 DW |
210 | } |
211 | ||
09c8a5b8 DW |
212 | static void __restart_chan(struct ioat2_dma_chan *ioat) |
213 | { | |
214 | struct ioat_chan_common *chan = &ioat->base; | |
215 | ||
216 | /* set the tail to be re-issued */ | |
217 | ioat->issued = ioat->tail; | |
218 | ioat->dmacount = 0; | |
219 | set_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
220 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
221 | ||
222 | dev_dbg(to_dev(chan), | |
223 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", | |
224 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | |
225 | ||
226 | if (ioat2_ring_pending(ioat)) { | |
227 | struct ioat_ring_ent *desc; | |
228 | ||
229 | desc = ioat2_get_ring_ent(ioat, ioat->tail); | |
230 | ioat2_set_chainaddr(ioat, desc->txd.phys); | |
231 | __ioat2_issue_pending(ioat); | |
232 | } else | |
233 | __ioat2_start_null_desc(ioat); | |
234 | } | |
235 | ||
236 | static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | |
237 | { | |
238 | struct ioat_chan_common *chan = &ioat->base; | |
239 | unsigned long phys_complete; | |
240 | u32 status; | |
241 | ||
242 | status = ioat_chansts(chan); | |
243 | if (is_ioat_active(status) || is_ioat_idle(status)) | |
244 | ioat_suspend(chan); | |
245 | while (is_ioat_active(status) || is_ioat_idle(status)) { | |
246 | status = ioat_chansts(chan); | |
247 | cpu_relax(); | |
248 | } | |
249 | ||
250 | if (ioat_cleanup_preamble(chan, &phys_complete)) | |
251 | __cleanup(ioat, phys_complete); | |
252 | ||
253 | __restart_chan(ioat); | |
254 | } | |
255 | ||
256 | static void ioat2_timer_event(unsigned long data) | |
257 | { | |
258 | struct ioat2_dma_chan *ioat = (void *) data; | |
259 | struct ioat_chan_common *chan = &ioat->base; | |
260 | ||
261 | spin_lock_bh(&chan->cleanup_lock); | |
262 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | |
263 | unsigned long phys_complete; | |
264 | u64 status; | |
265 | ||
266 | spin_lock_bh(&ioat->ring_lock); | |
267 | status = ioat_chansts(chan); | |
268 | ||
269 | /* when halted due to errors check for channel | |
270 | * programming errors before advancing the completion state | |
271 | */ | |
272 | if (is_ioat_halted(status)) { | |
273 | u32 chanerr; | |
274 | ||
275 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
276 | BUG_ON(is_ioat_bug(chanerr)); | |
277 | } | |
278 | ||
279 | /* if we haven't made progress and we have already | |
280 | * acknowledged a pending completion once, then be more | |
281 | * forceful with a restart | |
282 | */ | |
283 | if (ioat_cleanup_preamble(chan, &phys_complete)) | |
284 | __cleanup(ioat, phys_complete); | |
285 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | |
286 | ioat2_restart_channel(ioat); | |
287 | else { | |
288 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | |
289 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
290 | } | |
291 | spin_unlock_bh(&ioat->ring_lock); | |
292 | } | |
293 | spin_unlock_bh(&chan->cleanup_lock); | |
294 | } | |
295 | ||
5cbafa65 DW |
296 | /** |
297 | * ioat2_enumerate_channels - find and initialize the device's channels | |
298 | * @device: the device to be enumerated | |
299 | */ | |
300 | static int ioat2_enumerate_channels(struct ioatdma_device *device) | |
301 | { | |
302 | struct ioat2_dma_chan *ioat; | |
303 | struct device *dev = &device->pdev->dev; | |
304 | struct dma_device *dma = &device->common; | |
305 | u8 xfercap_log; | |
306 | int i; | |
307 | ||
308 | INIT_LIST_HEAD(&dma->channels); | |
309 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | |
bb320786 DW |
310 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ |
311 | if (dma->chancnt > ARRAY_SIZE(device->idx)) { | |
312 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | |
313 | dma->chancnt, ARRAY_SIZE(device->idx)); | |
314 | dma->chancnt = ARRAY_SIZE(device->idx); | |
315 | } | |
5cbafa65 | 316 | xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); |
bb320786 | 317 | xfercap_log &= 0x1f; /* bits [4:0] valid */ |
5cbafa65 DW |
318 | if (xfercap_log == 0) |
319 | return 0; | |
6df9183a | 320 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); |
5cbafa65 DW |
321 | |
322 | /* FIXME which i/oat version is i7300? */ | |
323 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | |
324 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) | |
325 | dma->chancnt--; | |
326 | #endif | |
327 | for (i = 0; i < dma->chancnt; i++) { | |
328 | ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); | |
329 | if (!ioat) | |
330 | break; | |
331 | ||
332 | ioat_init_channel(device, &ioat->base, i, | |
09c8a5b8 | 333 | ioat2_timer_event, |
5cbafa65 DW |
334 | ioat2_cleanup_tasklet, |
335 | (unsigned long) ioat); | |
336 | ioat->xfercap_log = xfercap_log; | |
337 | spin_lock_init(&ioat->ring_lock); | |
338 | } | |
339 | dma->chancnt = i; | |
340 | return i; | |
341 | } | |
342 | ||
343 | static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | |
344 | { | |
345 | struct dma_chan *c = tx->chan; | |
346 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
09c8a5b8 | 347 | struct ioat_chan_common *chan = &ioat->base; |
5cbafa65 DW |
348 | dma_cookie_t cookie = c->cookie; |
349 | ||
350 | cookie++; | |
351 | if (cookie < 0) | |
352 | cookie = 1; | |
353 | tx->cookie = cookie; | |
354 | c->cookie = cookie; | |
6df9183a DW |
355 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
356 | ||
09c8a5b8 DW |
357 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) |
358 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
5cbafa65 DW |
359 | ioat2_update_pending(ioat); |
360 | spin_unlock_bh(&ioat->ring_lock); | |
361 | ||
362 | return cookie; | |
363 | } | |
364 | ||
365 | static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan) | |
366 | { | |
367 | struct ioat_dma_descriptor *hw; | |
368 | struct ioat_ring_ent *desc; | |
369 | struct ioatdma_device *dma; | |
370 | dma_addr_t phys; | |
371 | ||
372 | dma = to_ioatdma_device(chan->device); | |
373 | hw = pci_pool_alloc(dma->dma_pool, GFP_KERNEL, &phys); | |
374 | if (!hw) | |
375 | return NULL; | |
376 | memset(hw, 0, sizeof(*hw)); | |
377 | ||
378 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); | |
379 | if (!desc) { | |
380 | pci_pool_free(dma->dma_pool, hw, phys); | |
381 | return NULL; | |
382 | } | |
383 | ||
384 | dma_async_tx_descriptor_init(&desc->txd, chan); | |
385 | desc->txd.tx_submit = ioat2_tx_submit_unlock; | |
386 | desc->hw = hw; | |
387 | desc->txd.phys = phys; | |
388 | return desc; | |
389 | } | |
390 | ||
391 | static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) | |
392 | { | |
393 | struct ioatdma_device *dma; | |
394 | ||
395 | dma = to_ioatdma_device(chan->device); | |
396 | pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); | |
397 | kfree(desc); | |
398 | } | |
399 | ||
400 | /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring | |
401 | * @chan: channel to be initialized | |
402 | */ | |
403 | static int ioat2_alloc_chan_resources(struct dma_chan *c) | |
404 | { | |
405 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
406 | struct ioat_chan_common *chan = &ioat->base; | |
407 | struct ioat_ring_ent **ring; | |
5cbafa65 DW |
408 | u32 chanerr; |
409 | int descs; | |
410 | int i; | |
411 | ||
412 | /* have we already been set up? */ | |
413 | if (ioat->ring) | |
414 | return 1 << ioat->alloc_order; | |
415 | ||
416 | /* Setup register to interrupt and write completion status on error */ | |
f6ab95b5 | 417 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); |
5cbafa65 DW |
418 | |
419 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
420 | if (chanerr) { | |
421 | dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); | |
422 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | |
423 | } | |
424 | ||
425 | /* allocate a completion writeback area */ | |
426 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | |
4fb9b9e8 DW |
427 | chan->completion = pci_pool_alloc(chan->device->completion_pool, |
428 | GFP_KERNEL, &chan->completion_dma); | |
429 | if (!chan->completion) | |
5cbafa65 DW |
430 | return -ENOMEM; |
431 | ||
4fb9b9e8 DW |
432 | memset(chan->completion, 0, sizeof(*chan->completion)); |
433 | writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, | |
5cbafa65 | 434 | chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); |
4fb9b9e8 | 435 | writel(((u64) chan->completion_dma) >> 32, |
5cbafa65 DW |
436 | chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); |
437 | ||
438 | ioat->alloc_order = ioat_get_alloc_order(); | |
439 | descs = 1 << ioat->alloc_order; | |
440 | ||
441 | /* allocate the array to hold the software ring */ | |
442 | ring = kcalloc(descs, sizeof(*ring), GFP_KERNEL); | |
443 | if (!ring) | |
444 | return -ENOMEM; | |
445 | for (i = 0; i < descs; i++) { | |
446 | ring[i] = ioat2_alloc_ring_ent(c); | |
447 | if (!ring[i]) { | |
448 | while (i--) | |
449 | ioat2_free_ring_ent(ring[i], c); | |
450 | kfree(ring); | |
451 | return -ENOMEM; | |
452 | } | |
6df9183a | 453 | set_desc_id(ring[i], i); |
5cbafa65 DW |
454 | } |
455 | ||
456 | /* link descs */ | |
457 | for (i = 0; i < descs-1; i++) { | |
458 | struct ioat_ring_ent *next = ring[i+1]; | |
459 | struct ioat_dma_descriptor *hw = ring[i]->hw; | |
460 | ||
461 | hw->next = next->txd.phys; | |
462 | } | |
463 | ring[i]->hw->next = ring[0]->txd.phys; | |
464 | ||
465 | spin_lock_bh(&ioat->ring_lock); | |
466 | ioat->ring = ring; | |
467 | ioat->head = 0; | |
468 | ioat->issued = 0; | |
469 | ioat->tail = 0; | |
470 | ioat->pending = 0; | |
471 | spin_unlock_bh(&ioat->ring_lock); | |
472 | ||
473 | tasklet_enable(&chan->cleanup_task); | |
474 | ioat2_start_null_desc(ioat); | |
475 | ||
476 | return descs; | |
477 | } | |
478 | ||
479 | /** | |
480 | * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops | |
481 | * @idx: gets starting descriptor index on successful allocation | |
482 | * @ioat: ioat2,3 channel (ring) to operate on | |
483 | * @num_descs: allocation length | |
484 | */ | |
485 | static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) | |
486 | { | |
487 | struct ioat_chan_common *chan = &ioat->base; | |
488 | ||
489 | spin_lock_bh(&ioat->ring_lock); | |
490 | if (unlikely(ioat2_ring_space(ioat) < num_descs)) { | |
491 | if (printk_ratelimit()) | |
492 | dev_dbg(to_dev(chan), | |
493 | "%s: ring full! num_descs: %d (%x:%x:%x)\n", | |
494 | __func__, num_descs, ioat->head, ioat->tail, | |
495 | ioat->issued); | |
496 | spin_unlock_bh(&ioat->ring_lock); | |
497 | ||
09c8a5b8 DW |
498 | /* progress reclaim in the allocation failure case we |
499 | * may be called under bh_disabled so we need to trigger | |
500 | * the timer event directly | |
501 | */ | |
502 | spin_lock_bh(&chan->cleanup_lock); | |
503 | if (jiffies > chan->timer.expires && | |
504 | timer_pending(&chan->timer)) { | |
505 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
506 | spin_unlock_bh(&chan->cleanup_lock); | |
507 | ioat2_timer_event((unsigned long) ioat); | |
508 | } else | |
509 | spin_unlock_bh(&chan->cleanup_lock); | |
5cbafa65 DW |
510 | return -ENOMEM; |
511 | } | |
512 | ||
513 | dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", | |
514 | __func__, num_descs, ioat->head, ioat->tail, ioat->issued); | |
515 | ||
516 | *idx = ioat2_desc_alloc(ioat, num_descs); | |
517 | return 0; /* with ioat->ring_lock held */ | |
518 | } | |
519 | ||
520 | static struct dma_async_tx_descriptor * | |
521 | ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | |
522 | dma_addr_t dma_src, size_t len, unsigned long flags) | |
523 | { | |
524 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
525 | struct ioat_dma_descriptor *hw; | |
526 | struct ioat_ring_ent *desc; | |
527 | dma_addr_t dst = dma_dest; | |
528 | dma_addr_t src = dma_src; | |
529 | size_t total_len = len; | |
530 | int num_descs; | |
531 | u16 idx; | |
532 | int i; | |
533 | ||
534 | num_descs = ioat2_xferlen_to_descs(ioat, len); | |
535 | if (likely(num_descs) && | |
536 | ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) | |
537 | /* pass */; | |
538 | else | |
539 | return NULL; | |
540 | for (i = 0; i < num_descs; i++) { | |
541 | size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log); | |
542 | ||
543 | desc = ioat2_get_ring_ent(ioat, idx + i); | |
544 | hw = desc->hw; | |
545 | ||
546 | hw->size = copy; | |
547 | hw->ctl = 0; | |
548 | hw->src_addr = src; | |
549 | hw->dst_addr = dst; | |
550 | ||
551 | len -= copy; | |
552 | dst += copy; | |
553 | src += copy; | |
6df9183a | 554 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
555 | } |
556 | ||
557 | desc->txd.flags = flags; | |
558 | desc->len = total_len; | |
559 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | |
560 | hw->ctl_f.compl_write = 1; | |
6df9183a | 561 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
562 | /* we leave the channel locked to ensure in order submission */ |
563 | ||
564 | return &desc->txd; | |
565 | } | |
566 | ||
567 | /** | |
568 | * ioat2_free_chan_resources - release all the descriptors | |
569 | * @chan: the channel to be cleaned | |
570 | */ | |
571 | static void ioat2_free_chan_resources(struct dma_chan *c) | |
572 | { | |
573 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
574 | struct ioat_chan_common *chan = &ioat->base; | |
575 | struct ioatdma_device *ioatdma_device = chan->device; | |
576 | struct ioat_ring_ent *desc; | |
577 | const u16 total_descs = 1 << ioat->alloc_order; | |
578 | int descs; | |
579 | int i; | |
580 | ||
581 | /* Before freeing channel resources first check | |
582 | * if they have been previously allocated for this channel. | |
583 | */ | |
584 | if (!ioat->ring) | |
585 | return; | |
586 | ||
587 | tasklet_disable(&chan->cleanup_task); | |
09c8a5b8 | 588 | del_timer_sync(&chan->timer); |
5cbafa65 DW |
589 | ioat2_cleanup(ioat); |
590 | ||
591 | /* Delay 100ms after reset to allow internal DMA logic to quiesce | |
592 | * before removing DMA descriptor resources. | |
593 | */ | |
594 | writeb(IOAT_CHANCMD_RESET, | |
595 | chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); | |
596 | mdelay(100); | |
597 | ||
598 | spin_lock_bh(&ioat->ring_lock); | |
599 | descs = ioat2_ring_space(ioat); | |
6df9183a | 600 | dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); |
5cbafa65 DW |
601 | for (i = 0; i < descs; i++) { |
602 | desc = ioat2_get_ring_ent(ioat, ioat->head + i); | |
603 | ioat2_free_ring_ent(desc, c); | |
604 | } | |
605 | ||
606 | if (descs < total_descs) | |
607 | dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", | |
608 | total_descs - descs); | |
609 | ||
610 | for (i = 0; i < total_descs - descs; i++) { | |
611 | desc = ioat2_get_ring_ent(ioat, ioat->tail + i); | |
6df9183a | 612 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
613 | ioat2_free_ring_ent(desc, c); |
614 | } | |
615 | ||
616 | kfree(ioat->ring); | |
617 | ioat->ring = NULL; | |
618 | ioat->alloc_order = 0; | |
619 | pci_pool_free(ioatdma_device->completion_pool, | |
4fb9b9e8 DW |
620 | chan->completion, |
621 | chan->completion_dma); | |
5cbafa65 DW |
622 | spin_unlock_bh(&ioat->ring_lock); |
623 | ||
624 | chan->last_completion = 0; | |
4fb9b9e8 | 625 | chan->completion_dma = 0; |
5cbafa65 DW |
626 | ioat->pending = 0; |
627 | ioat->dmacount = 0; | |
5cbafa65 DW |
628 | } |
629 | ||
630 | static enum dma_status | |
631 | ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, | |
632 | dma_cookie_t *done, dma_cookie_t *used) | |
633 | { | |
634 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
635 | ||
636 | if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) | |
637 | return DMA_SUCCESS; | |
638 | ||
639 | ioat2_cleanup(ioat); | |
640 | ||
641 | return ioat_is_complete(c, cookie, done, used); | |
642 | } | |
643 | ||
345d8523 | 644 | int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) |
5cbafa65 DW |
645 | { |
646 | struct pci_dev *pdev = device->pdev; | |
647 | struct dma_device *dma; | |
648 | struct dma_chan *c; | |
649 | struct ioat_chan_common *chan; | |
650 | int err; | |
651 | ||
652 | device->enumerate_channels = ioat2_enumerate_channels; | |
653 | dma = &device->common; | |
654 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | |
655 | dma->device_issue_pending = ioat2_issue_pending; | |
656 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | |
657 | dma->device_free_chan_resources = ioat2_free_chan_resources; | |
658 | dma->device_is_tx_complete = ioat2_is_complete; | |
659 | ||
660 | err = ioat_probe(device); | |
661 | if (err) | |
662 | return err; | |
663 | ioat_set_tcp_copy_break(2048); | |
664 | ||
665 | list_for_each_entry(c, &dma->channels, device_node) { | |
666 | chan = to_chan_common(c); | |
667 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, | |
668 | chan->reg_base + IOAT_DCACTRL_OFFSET); | |
669 | } | |
670 | ||
671 | err = ioat_register(device); | |
672 | if (err) | |
673 | return err; | |
674 | if (dca) | |
675 | device->dca = ioat2_dca_init(pdev, device->reg_base); | |
676 | ||
5cbafa65 DW |
677 | return err; |
678 | } | |
679 | ||
345d8523 | 680 | int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) |
5cbafa65 DW |
681 | { |
682 | struct pci_dev *pdev = device->pdev; | |
683 | struct dma_device *dma; | |
684 | struct dma_chan *c; | |
685 | struct ioat_chan_common *chan; | |
686 | int err; | |
687 | u16 dev_id; | |
688 | ||
689 | device->enumerate_channels = ioat2_enumerate_channels; | |
690 | dma = &device->common; | |
691 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | |
692 | dma->device_issue_pending = ioat2_issue_pending; | |
693 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | |
694 | dma->device_free_chan_resources = ioat2_free_chan_resources; | |
695 | dma->device_is_tx_complete = ioat2_is_complete; | |
696 | ||
697 | /* -= IOAT ver.3 workarounds =- */ | |
698 | /* Write CHANERRMSK_INT with 3E07h to mask out the errors | |
699 | * that can cause stability issues for IOAT ver.3 | |
700 | */ | |
701 | pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); | |
702 | ||
703 | /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | |
704 | * (workaround for spurious config parity error after restart) | |
705 | */ | |
706 | pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); | |
707 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) | |
708 | pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); | |
709 | ||
710 | err = ioat_probe(device); | |
711 | if (err) | |
712 | return err; | |
713 | ioat_set_tcp_copy_break(262144); | |
714 | ||
715 | list_for_each_entry(c, &dma->channels, device_node) { | |
716 | chan = to_chan_common(c); | |
717 | writel(IOAT_DMA_DCA_ANY_CPU, | |
718 | chan->reg_base + IOAT_DCACTRL_OFFSET); | |
719 | } | |
720 | ||
721 | err = ioat_register(device); | |
722 | if (err) | |
723 | return err; | |
724 | if (dca) | |
725 | device->dca = ioat3_dca_init(pdev, device->reg_base); | |
726 | ||
727 | return err; | |
728 | } |