2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
38 #include "registers.h"
41 int ioat_pending_level = 4;
42 module_param(ioat_pending_level, int, 0644);
43 MODULE_PARM_DESC(ioat_pending_level,
44 "high-water mark for pushing ioat descriptors (default: 4)");
46 /* internal functions */
47 static void ioat1_cleanup(struct ioat_dma_chan *ioat);
48 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
51 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
53 * @data: interrupt data
55 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
57 struct ioatdma_device *instance = data;
58 struct ioat_chan_common *chan;
59 unsigned long attnstatus;
63 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
65 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
68 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
69 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
73 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
74 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
75 chan = ioat_chan_by_index(instance, bit);
76 tasklet_schedule(&chan->cleanup_task);
79 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
84 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
86 * @data: interrupt data
88 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
90 struct ioat_chan_common *chan = data;
92 tasklet_schedule(&chan->cleanup_task);
97 static void ioat1_cleanup_tasklet(unsigned long data);
99 /* common channel initialization */
100 void ioat_init_channel(struct ioatdma_device *device,
101 struct ioat_chan_common *chan, int idx,
102 work_func_t work_fn, void (*tasklet)(unsigned long),
103 unsigned long tasklet_data)
105 struct dma_device *dma = &device->common;
107 chan->device = device;
108 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
109 INIT_DELAYED_WORK(&chan->work, work_fn);
110 spin_lock_init(&chan->cleanup_lock);
111 chan->common.device = dma;
112 list_add_tail(&chan->common.device_node, &dma->channels);
113 device->idx[idx] = chan;
114 tasklet_init(&chan->cleanup_task, tasklet, tasklet_data);
115 tasklet_disable(&chan->cleanup_task);
118 static void ioat1_reset_part2(struct work_struct *work);
121 * ioat1_dma_enumerate_channels - find and initialize the device's channels
122 * @device: the device to be enumerated
124 static int ioat1_enumerate_channels(struct ioatdma_device *device)
129 struct ioat_dma_chan *ioat;
130 struct device *dev = &device->pdev->dev;
131 struct dma_device *dma = &device->common;
133 INIT_LIST_HEAD(&dma->channels);
134 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
135 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
136 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
137 dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
139 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
140 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
143 for (i = 0; i < dma->chancnt; i++) {
144 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
148 ioat_init_channel(device, &ioat->base, i,
150 ioat1_cleanup_tasklet,
151 (unsigned long) ioat);
152 ioat->xfercap = xfercap;
153 spin_lock_init(&ioat->desc_lock);
154 INIT_LIST_HEAD(&ioat->free_desc);
155 INIT_LIST_HEAD(&ioat->used_desc);
162 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
164 * @chan: DMA channel handle
167 __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
169 void __iomem *reg_base = ioat->base.reg_base;
171 dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
172 __func__, ioat->pending);
174 writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
177 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
179 struct ioat_dma_chan *ioat = to_ioat_chan(chan);
181 if (ioat->pending > 0) {
182 spin_lock_bh(&ioat->desc_lock);
183 __ioat1_dma_memcpy_issue_pending(ioat);
184 spin_unlock_bh(&ioat->desc_lock);
189 * ioat1_reset_part2 - reinit the channel after a reset
191 static void ioat1_reset_part2(struct work_struct *work)
193 struct ioat_chan_common *chan;
194 struct ioat_dma_chan *ioat;
195 struct ioat_desc_sw *desc;
197 bool start_null = false;
199 chan = container_of(work, struct ioat_chan_common, work.work);
200 ioat = container_of(chan, struct ioat_dma_chan, base);
201 spin_lock_bh(&chan->cleanup_lock);
202 spin_lock_bh(&ioat->desc_lock);
204 chan->completion_virt->low = 0;
205 chan->completion_virt->high = 0;
208 /* count the descriptors waiting */
210 if (ioat->used_desc.prev) {
211 desc = to_ioat_desc(ioat->used_desc.prev);
214 desc = to_ioat_desc(desc->node.next);
215 } while (&desc->node != ioat->used_desc.next);
220 * write the new starting descriptor address
221 * this puts channel engine into ARMED state
223 desc = to_ioat_desc(ioat->used_desc.prev);
224 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
225 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
226 writel(((u64) desc->txd.phys) >> 32,
227 chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
229 writeb(IOAT_CHANCMD_START, chan->reg_base
230 + IOAT_CHANCMD_OFFSET(chan->device->version));
233 spin_unlock_bh(&ioat->desc_lock);
234 spin_unlock_bh(&chan->cleanup_lock);
236 dev_err(to_dev(chan),
237 "chan%d reset - %d descs waiting, %d total desc\n",
238 chan_num(chan), dmacount, ioat->desccount);
241 ioat1_dma_start_null_desc(ioat);
245 * ioat1_reset_channel - restart a channel
246 * @ioat: IOAT DMA channel handle
248 static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
250 struct ioat_chan_common *chan = &ioat->base;
251 void __iomem *reg_base = chan->reg_base;
252 u32 chansts, chanerr;
254 if (!ioat->used_desc.prev)
257 dev_dbg(to_dev(chan), "%s\n", __func__);
258 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
259 chansts = (chan->completion_virt->low
260 & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
262 dev_err(to_dev(chan),
263 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
264 chan_num(chan), chansts, chanerr);
265 writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
269 * whack it upside the head with a reset
270 * and wait for things to settle out.
271 * force the pending count to a really big negative
272 * to make sure no one forces an issue_pending
273 * while we're waiting.
276 spin_lock_bh(&ioat->desc_lock);
277 ioat->pending = INT_MIN;
278 writeb(IOAT_CHANCMD_RESET,
279 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
280 spin_unlock_bh(&ioat->desc_lock);
282 /* schedule the 2nd half instead of sleeping a long time */
283 schedule_delayed_work(&chan->work, RESET_DELAY);
287 * ioat1_chan_watchdog - watch for stuck channels
289 static void ioat1_chan_watchdog(struct work_struct *work)
291 struct ioatdma_device *device =
292 container_of(work, struct ioatdma_device, work.work);
293 struct ioat_dma_chan *ioat;
294 struct ioat_chan_common *chan;
304 unsigned long compl_desc_addr_hw;
306 for (i = 0; i < device->common.chancnt; i++) {
307 chan = ioat_chan_by_index(device, i);
308 ioat = container_of(chan, struct ioat_dma_chan, base);
310 if (/* have we started processing anything yet */
311 chan->last_completion
312 /* have we completed any since last watchdog cycle? */
313 && (chan->last_completion == chan->watchdog_completion)
314 /* has TCP stuck on one cookie since last watchdog? */
315 && (chan->watchdog_tcp_cookie == chan->watchdog_last_tcp_cookie)
316 && (chan->watchdog_tcp_cookie != chan->completed_cookie)
317 /* is there something in the chain to be processed? */
318 /* CB1 chain always has at least the last one processed */
319 && (ioat->used_desc.prev != ioat->used_desc.next)
320 && ioat->pending == 0) {
323 * check CHANSTS register for completed
324 * descriptor address.
325 * if it is different than completion writeback,
327 * and it has changed since the last watchdog
328 * we can assume that channel
329 * is still working correctly
330 * and the problem is in completion writeback.
331 * update completion writeback
332 * with actual CHANSTS value
334 * try resetting the channel
337 completion_hw.low = readl(chan->reg_base +
338 IOAT_CHANSTS_OFFSET_LOW(chan->device->version));
339 completion_hw.high = readl(chan->reg_base +
340 IOAT_CHANSTS_OFFSET_HIGH(chan->device->version));
341 #if (BITS_PER_LONG == 64)
344 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
347 completion_hw.low & IOAT_LOW_COMPLETION_MASK;
350 if ((compl_desc_addr_hw != 0)
351 && (compl_desc_addr_hw != chan->watchdog_completion)
352 && (compl_desc_addr_hw != chan->last_compl_desc_addr_hw)) {
353 chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
354 chan->completion_virt->low = completion_hw.low;
355 chan->completion_virt->high = completion_hw.high;
357 ioat1_reset_channel(ioat);
358 chan->watchdog_completion = 0;
359 chan->last_compl_desc_addr_hw = 0;
362 chan->last_compl_desc_addr_hw = 0;
363 chan->watchdog_completion = chan->last_completion;
366 chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie;
369 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
372 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
374 struct dma_chan *c = tx->chan;
375 struct ioat_dma_chan *ioat = to_ioat_chan(c);
376 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
377 struct ioat_desc_sw *first;
378 struct ioat_desc_sw *chain_tail;
381 spin_lock_bh(&ioat->desc_lock);
382 /* cookie incr and addition to used_list must be atomic */
389 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
391 /* write address into NextDescriptor field of last desc in chain */
392 first = to_ioat_desc(tx->tx_list.next);
393 chain_tail = to_ioat_desc(ioat->used_desc.prev);
394 /* make descriptor updates globally visible before chaining */
396 chain_tail->hw->next = first->txd.phys;
397 list_splice_tail_init(&tx->tx_list, &ioat->used_desc);
398 dump_desc_dbg(ioat, chain_tail);
399 dump_desc_dbg(ioat, first);
401 ioat->pending += desc->tx_cnt;
402 if (ioat->pending >= ioat_pending_level)
403 __ioat1_dma_memcpy_issue_pending(ioat);
404 spin_unlock_bh(&ioat->desc_lock);
410 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
411 * @ioat: the channel supplying the memory pool for the descriptors
412 * @flags: allocation flags
414 static struct ioat_desc_sw *
415 ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
417 struct ioat_dma_descriptor *desc;
418 struct ioat_desc_sw *desc_sw;
419 struct ioatdma_device *ioatdma_device;
422 ioatdma_device = ioat->base.device;
423 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
427 desc_sw = kzalloc(sizeof(*desc_sw), flags);
428 if (unlikely(!desc_sw)) {
429 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
433 memset(desc, 0, sizeof(*desc));
435 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
436 desc_sw->txd.tx_submit = ioat1_tx_submit;
438 desc_sw->txd.phys = phys;
439 set_desc_id(desc_sw, -1);
444 static int ioat_initial_desc_count = 256;
445 module_param(ioat_initial_desc_count, int, 0644);
446 MODULE_PARM_DESC(ioat_initial_desc_count,
447 "ioat1: initial descriptors per channel (default: 256)");
449 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
450 * @chan: the channel to be filled out
452 static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
454 struct ioat_dma_chan *ioat = to_ioat_chan(c);
455 struct ioat_chan_common *chan = &ioat->base;
456 struct ioat_desc_sw *desc;
462 /* have we already been set up? */
463 if (!list_empty(&ioat->free_desc))
464 return ioat->desccount;
466 /* Setup register to interrupt and write completion status on error */
467 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
468 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
469 IOAT_CHANCTRL_ERR_COMPLETION_EN;
470 writew(chanctrl, chan->reg_base + IOAT_CHANCTRL_OFFSET);
472 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
474 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
475 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
478 /* Allocate descriptors */
479 for (i = 0; i < ioat_initial_desc_count; i++) {
480 desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
482 dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
485 set_desc_id(desc, i);
486 list_add_tail(&desc->node, &tmp_list);
488 spin_lock_bh(&ioat->desc_lock);
490 list_splice(&tmp_list, &ioat->free_desc);
491 spin_unlock_bh(&ioat->desc_lock);
493 /* allocate a completion writeback area */
494 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
495 chan->completion_virt = pci_pool_alloc(chan->device->completion_pool,
497 &chan->completion_addr);
498 memset(chan->completion_virt, 0,
499 sizeof(*chan->completion_virt));
500 writel(((u64) chan->completion_addr) & 0x00000000FFFFFFFF,
501 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
502 writel(((u64) chan->completion_addr) >> 32,
503 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
505 tasklet_enable(&chan->cleanup_task);
506 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
507 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
508 __func__, ioat->desccount);
509 return ioat->desccount;
513 * ioat1_dma_free_chan_resources - release all the descriptors
514 * @chan: the channel to be cleaned
516 static void ioat1_dma_free_chan_resources(struct dma_chan *c)
518 struct ioat_dma_chan *ioat = to_ioat_chan(c);
519 struct ioat_chan_common *chan = &ioat->base;
520 struct ioatdma_device *ioatdma_device = chan->device;
521 struct ioat_desc_sw *desc, *_desc;
522 int in_use_descs = 0;
524 /* Before freeing channel resources first check
525 * if they have been previously allocated for this channel.
527 if (ioat->desccount == 0)
530 tasklet_disable(&chan->cleanup_task);
533 /* Delay 100ms after reset to allow internal DMA logic to quiesce
534 * before removing DMA descriptor resources.
536 writeb(IOAT_CHANCMD_RESET,
537 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
540 spin_lock_bh(&ioat->desc_lock);
541 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
542 dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
543 __func__, desc_id(desc));
544 dump_desc_dbg(ioat, desc);
546 list_del(&desc->node);
547 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
551 list_for_each_entry_safe(desc, _desc,
552 &ioat->free_desc, node) {
553 list_del(&desc->node);
554 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
558 spin_unlock_bh(&ioat->desc_lock);
560 pci_pool_free(ioatdma_device->completion_pool,
561 chan->completion_virt,
562 chan->completion_addr);
564 /* one is ok since we left it on there on purpose */
565 if (in_use_descs > 1)
566 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
569 chan->last_completion = chan->completion_addr = 0;
570 chan->watchdog_completion = 0;
571 chan->last_compl_desc_addr_hw = 0;
572 chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0;
578 * ioat1_dma_get_next_descriptor - return the next available descriptor
579 * @ioat: IOAT DMA channel handle
581 * Gets the next descriptor from the chain, and must be called with the
582 * channel's desc_lock held. Allocates more descriptors if the channel
585 static struct ioat_desc_sw *
586 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
588 struct ioat_desc_sw *new;
590 if (!list_empty(&ioat->free_desc)) {
591 new = to_ioat_desc(ioat->free_desc.next);
592 list_del(&new->node);
594 /* try to get another desc */
595 new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
597 dev_err(to_dev(&ioat->base), "alloc failed\n");
601 dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
602 __func__, desc_id(new));
607 static struct dma_async_tx_descriptor *
608 ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
609 dma_addr_t dma_src, size_t len, unsigned long flags)
611 struct ioat_dma_chan *ioat = to_ioat_chan(c);
612 struct ioat_desc_sw *desc;
615 dma_addr_t src = dma_src;
616 dma_addr_t dest = dma_dest;
617 size_t total_len = len;
618 struct ioat_dma_descriptor *hw = NULL;
621 spin_lock_bh(&ioat->desc_lock);
622 desc = ioat1_dma_get_next_descriptor(ioat);
628 copy = min_t(size_t, len, ioat->xfercap);
636 list_add_tail(&desc->node, &chain);
642 struct ioat_desc_sw *next;
644 async_tx_ack(&desc->txd);
645 next = ioat1_dma_get_next_descriptor(ioat);
646 hw->next = next ? next->txd.phys : 0;
647 dump_desc_dbg(ioat, desc);
654 struct ioat_chan_common *chan = &ioat->base;
656 dev_err(to_dev(chan),
657 "chan%d - get_next_desc failed\n", chan_num(chan));
658 list_splice(&chain, &ioat->free_desc);
659 spin_unlock_bh(&ioat->desc_lock);
662 spin_unlock_bh(&ioat->desc_lock);
664 desc->txd.flags = flags;
665 desc->tx_cnt = tx_cnt;
666 desc->len = total_len;
667 list_splice(&chain, &desc->txd.tx_list);
668 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
669 hw->ctl_f.compl_write = 1;
670 dump_desc_dbg(ioat, desc);
675 static void ioat1_cleanup_tasklet(unsigned long data)
677 struct ioat_dma_chan *chan = (void *)data;
679 writew(IOAT_CHANCTRL_INT_DISABLE,
680 chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
683 static void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
684 int direction, enum dma_ctrl_flags flags, bool dst)
686 if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) ||
687 (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE)))
688 pci_unmap_single(pdev, addr, len, direction);
690 pci_unmap_page(pdev, addr, len, direction);
694 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
695 size_t len, struct ioat_dma_descriptor *hw)
697 struct pci_dev *pdev = chan->device->pdev;
698 size_t offset = len - hw->size;
700 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
701 ioat_unmap(pdev, hw->dst_addr - offset, len,
702 PCI_DMA_FROMDEVICE, flags, 1);
704 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
705 ioat_unmap(pdev, hw->src_addr - offset, len,
706 PCI_DMA_TODEVICE, flags, 0);
709 unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
711 unsigned long phys_complete;
713 /* The completion writeback can happen at any time,
714 so reads by the driver need to be atomic operations
715 The descriptor physical addresses are limited to 32-bits
716 when the CPU can only do a 32-bit mov */
718 #if (BITS_PER_LONG == 64)
720 chan->completion_virt->full
721 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
723 phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
726 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
727 (unsigned long long) phys_complete);
729 if ((chan->completion_virt->full
730 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
731 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
732 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
733 readl(chan->reg_base + IOAT_CHANERR_OFFSET));
735 /* TODO do something to salvage the situation */
738 return phys_complete;
742 * ioat1_cleanup - cleanup up finished descriptors
743 * @chan: ioat channel to be cleaned up
745 static void ioat1_cleanup(struct ioat_dma_chan *ioat)
747 struct ioat_chan_common *chan = &ioat->base;
748 unsigned long phys_complete;
749 struct ioat_desc_sw *desc, *_desc;
750 dma_cookie_t cookie = 0;
751 struct dma_async_tx_descriptor *tx;
753 prefetch(chan->completion_virt);
755 if (!spin_trylock_bh(&chan->cleanup_lock))
758 phys_complete = ioat_get_current_completion(chan);
759 if (phys_complete == chan->last_completion) {
760 spin_unlock_bh(&chan->cleanup_lock);
762 * perhaps we're stuck so hard that the watchdog can't go off?
763 * try to catch it after 2 seconds
765 if (time_after(jiffies,
766 chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
767 ioat1_chan_watchdog(&(chan->device->work.work));
768 chan->last_completion_time = jiffies;
772 chan->last_completion_time = jiffies;
775 if (!spin_trylock_bh(&ioat->desc_lock)) {
776 spin_unlock_bh(&chan->cleanup_lock);
780 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
781 __func__, phys_complete);
782 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
785 * Incoming DMA requests may use multiple descriptors,
786 * due to exceeding xfercap, perhaps. If so, only the
787 * last one will have a cookie, and require unmapping.
789 dump_desc_dbg(ioat, desc);
792 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
794 tx->callback(tx->callback_param);
799 if (tx->phys != phys_complete) {
801 * a completed entry, but not the last, so clean
802 * up if the client is done with the descriptor
804 if (async_tx_test_ack(tx))
805 list_move_tail(&desc->node, &ioat->free_desc);
810 * last used desc. Do not remove, so we can
811 * append from it, but don't look at it next
816 /* TODO check status bits? */
821 spin_unlock_bh(&ioat->desc_lock);
823 chan->last_completion = phys_complete;
825 chan->completed_cookie = cookie;
827 spin_unlock_bh(&chan->cleanup_lock);
830 static enum dma_status
831 ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
832 dma_cookie_t *done, dma_cookie_t *used)
834 struct ioat_dma_chan *ioat = to_ioat_chan(c);
836 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
841 return ioat_is_complete(c, cookie, done, used);
844 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
846 struct ioat_chan_common *chan = &ioat->base;
847 struct ioat_desc_sw *desc;
848 struct ioat_dma_descriptor *hw;
850 spin_lock_bh(&ioat->desc_lock);
852 desc = ioat1_dma_get_next_descriptor(ioat);
855 dev_err(to_dev(chan),
856 "Unable to start null desc - get next desc failed\n");
857 spin_unlock_bh(&ioat->desc_lock);
864 hw->ctl_f.int_en = 1;
865 hw->ctl_f.compl_write = 1;
866 /* set size to non-zero value (channel returns error when size is 0) */
867 hw->size = NULL_DESC_BUFFER_SIZE;
870 async_tx_ack(&desc->txd);
872 list_add_tail(&desc->node, &ioat->used_desc);
873 dump_desc_dbg(ioat, desc);
875 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
876 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
877 writel(((u64) desc->txd.phys) >> 32,
878 chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
880 writeb(IOAT_CHANCMD_START, chan->reg_base
881 + IOAT_CHANCMD_OFFSET(chan->device->version));
882 spin_unlock_bh(&ioat->desc_lock);
886 * Perform a IOAT transaction to verify the HW works.
888 #define IOAT_TEST_SIZE 2000
890 static void ioat_dma_test_callback(void *dma_async_param)
892 struct completion *cmp = dma_async_param;
898 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
899 * @device: device to be tested
901 static int ioat_dma_self_test(struct ioatdma_device *device)
906 struct dma_device *dma = &device->common;
907 struct device *dev = &device->pdev->dev;
908 struct dma_chan *dma_chan;
909 struct dma_async_tx_descriptor *tx;
910 dma_addr_t dma_dest, dma_src;
913 struct completion cmp;
917 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
920 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
926 /* Fill in src buffer */
927 for (i = 0; i < IOAT_TEST_SIZE; i++)
930 /* Start copy, using first DMA channel */
931 dma_chan = container_of(dma->channels.next, struct dma_chan,
933 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
934 dev_err(dev, "selftest cannot allocate chan resource\n");
939 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
940 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
941 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
943 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
944 IOAT_TEST_SIZE, flags);
946 dev_err(dev, "Self-test prep failed, disabling\n");
952 init_completion(&cmp);
953 tx->callback = ioat_dma_test_callback;
954 tx->callback_param = &cmp;
955 cookie = tx->tx_submit(tx);
957 dev_err(dev, "Self-test setup failed, disabling\n");
961 dma->device_issue_pending(dma_chan);
963 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
966 dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
968 dev_err(dev, "Self-test copy timed out, disabling\n");
972 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
973 dev_err(dev, "Self-test copy failed compare, disabling\n");
979 dma->device_free_chan_resources(dma_chan);
986 static char ioat_interrupt_style[32] = "msix";
987 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
988 sizeof(ioat_interrupt_style), 0644);
989 MODULE_PARM_DESC(ioat_interrupt_style,
990 "set ioat interrupt style: msix (default), "
991 "msix-single-vector, msi, intx)");
994 * ioat_dma_setup_interrupts - setup interrupt handler
995 * @device: ioat device
997 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
999 struct ioat_chan_common *chan;
1000 struct pci_dev *pdev = device->pdev;
1001 struct device *dev = &pdev->dev;
1002 struct msix_entry *msix;
1007 if (!strcmp(ioat_interrupt_style, "msix"))
1009 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
1010 goto msix_single_vector;
1011 if (!strcmp(ioat_interrupt_style, "msi"))
1013 if (!strcmp(ioat_interrupt_style, "intx"))
1015 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
1019 /* The number of MSI-X vectors should equal the number of channels */
1020 msixcnt = device->common.chancnt;
1021 for (i = 0; i < msixcnt; i++)
1022 device->msix_entries[i].entry = i;
1024 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
1028 goto msix_single_vector;
1030 for (i = 0; i < msixcnt; i++) {
1031 msix = &device->msix_entries[i];
1032 chan = ioat_chan_by_index(device, i);
1033 err = devm_request_irq(dev, msix->vector,
1034 ioat_dma_do_interrupt_msix, 0,
1037 for (j = 0; j < i; j++) {
1038 msix = &device->msix_entries[j];
1039 chan = ioat_chan_by_index(device, j);
1040 devm_free_irq(dev, msix->vector, chan);
1042 goto msix_single_vector;
1045 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1049 msix = &device->msix_entries[0];
1051 err = pci_enable_msix(pdev, device->msix_entries, 1);
1055 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
1056 "ioat-msix", device);
1058 pci_disable_msix(pdev);
1064 err = pci_enable_msi(pdev);
1068 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
1069 "ioat-msi", device);
1071 pci_disable_msi(pdev);
1077 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
1078 IRQF_SHARED, "ioat-intx", device);
1083 if (device->intr_quirk)
1084 device->intr_quirk(device);
1085 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1086 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1090 /* Disable all interrupt generation */
1091 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1092 dev_err(dev, "no usable interrupts\n");
1096 static void ioat_disable_interrupts(struct ioatdma_device *device)
1098 /* Disable all interrupt generation */
1099 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1102 int ioat_probe(struct ioatdma_device *device)
1105 struct dma_device *dma = &device->common;
1106 struct pci_dev *pdev = device->pdev;
1107 struct device *dev = &pdev->dev;
1109 /* DMA coherent memory pool for DMA descriptor allocations */
1110 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1111 sizeof(struct ioat_dma_descriptor),
1113 if (!device->dma_pool) {
1118 device->completion_pool = pci_pool_create("completion_pool", pdev,
1119 sizeof(u64), SMP_CACHE_BYTES,
1122 if (!device->completion_pool) {
1124 goto err_completion_pool;
1127 device->enumerate_channels(device);
1129 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
1130 dma->dev = &pdev->dev;
1132 dev_err(dev, "Intel(R) I/OAT DMA Engine found,"
1133 " %d channels, device version 0x%02x, driver version %s\n",
1134 dma->chancnt, device->version, IOAT_DMA_VERSION);
1136 if (!dma->chancnt) {
1137 dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: "
1138 "zero channels detected\n");
1139 goto err_setup_interrupts;
1142 err = ioat_dma_setup_interrupts(device);
1144 goto err_setup_interrupts;
1146 err = ioat_dma_self_test(device);
1153 ioat_disable_interrupts(device);
1154 err_setup_interrupts:
1155 pci_pool_destroy(device->completion_pool);
1156 err_completion_pool:
1157 pci_pool_destroy(device->dma_pool);
1162 int ioat_register(struct ioatdma_device *device)
1164 int err = dma_async_device_register(&device->common);
1167 ioat_disable_interrupts(device);
1168 pci_pool_destroy(device->completion_pool);
1169 pci_pool_destroy(device->dma_pool);
1175 /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1176 static void ioat1_intr_quirk(struct ioatdma_device *device)
1178 struct pci_dev *pdev = device->pdev;
1181 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1182 if (pdev->msi_enabled)
1183 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1185 dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
1186 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1189 int ioat1_dma_probe(struct ioatdma_device *device, int dca)
1191 struct pci_dev *pdev = device->pdev;
1192 struct dma_device *dma;
1195 device->intr_quirk = ioat1_intr_quirk;
1196 device->enumerate_channels = ioat1_enumerate_channels;
1197 dma = &device->common;
1198 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1199 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1200 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
1201 dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
1202 dma->device_is_tx_complete = ioat1_dma_is_complete;
1204 err = ioat_probe(device);
1207 ioat_set_tcp_copy_break(4096);
1208 err = ioat_register(device);
1212 device->dca = ioat_dca_init(pdev, device->reg_base);
1214 INIT_DELAYED_WORK(&device->work, ioat1_chan_watchdog);
1215 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
1220 void ioat_dma_remove(struct ioatdma_device *device)
1222 struct dma_device *dma = &device->common;
1224 if (device->version != IOAT_VER_3_0)
1225 cancel_delayed_work(&device->work);
1227 ioat_disable_interrupts(device);
1229 dma_async_device_unregister(dma);
1231 pci_pool_destroy(device->dma_pool);
1232 pci_pool_destroy(device->completion_pool);
1234 INIT_LIST_HEAD(&dma->channels);