2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
25 * does asynchronous data movement and checksumming operations.
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
39 #include "registers.h"
42 static int ioat_ring_alloc_order = 8;
43 module_param(ioat_ring_alloc_order, int, 0644);
44 MODULE_PARM_DESC(ioat_ring_alloc_order,
45 "ioat2+: allocate 2^n descriptors per channel (default: n=8)");
47 static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
49 void * __iomem reg_base = ioat->base.reg_base;
52 ioat->dmacount += ioat2_ring_pending(ioat);;
53 ioat->issued = ioat->head;
54 /* make descriptor updates globally visible before notifying channel */
56 writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
57 dev_dbg(to_dev(&ioat->base),
58 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
59 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
62 static void ioat2_issue_pending(struct dma_chan *chan)
64 struct ioat2_dma_chan *ioat = to_ioat2_chan(chan);
66 spin_lock_bh(&ioat->ring_lock);
67 if (ioat->pending == 1)
68 __ioat2_issue_pending(ioat);
69 spin_unlock_bh(&ioat->ring_lock);
73 * ioat2_update_pending - log pending descriptors
74 * @ioat: ioat2+ channel
76 * set pending to '1' unless pending is already set to '2', pending == 2
77 * indicates that submission is temporarily blocked due to an in-flight
78 * reset. If we are already above the ioat_pending_level threshold then
81 * called with ring_lock held
83 static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
85 if (unlikely(ioat->pending == 2))
87 else if (ioat2_ring_pending(ioat) > ioat_pending_level)
88 __ioat2_issue_pending(ioat);
93 static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
95 struct ioat_ring_ent *desc;
96 struct ioat_dma_descriptor *hw;
99 if (ioat2_ring_space(ioat) < 1) {
100 dev_err(to_dev(&ioat->base),
101 "Unable to start null desc - ring full\n");
105 dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
106 __func__, ioat->head, ioat->tail, ioat->issued);
107 idx = ioat2_desc_alloc(ioat, 1);
108 desc = ioat2_get_ring_ent(ioat, idx);
113 hw->ctl_f.int_en = 1;
114 hw->ctl_f.compl_write = 1;
115 /* set size to non-zero value (channel returns error when size is 0) */
116 hw->size = NULL_DESC_BUFFER_SIZE;
119 async_tx_ack(&desc->txd);
120 ioat2_set_chainaddr(ioat, desc->txd.phys);
121 dump_desc_dbg(ioat, desc);
122 __ioat2_issue_pending(ioat);
125 static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
127 spin_lock_bh(&ioat->ring_lock);
128 __ioat2_start_null_desc(ioat);
129 spin_unlock_bh(&ioat->ring_lock);
132 static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
134 struct ioat_chan_common *chan = &ioat->base;
135 struct dma_async_tx_descriptor *tx;
136 struct ioat_ring_ent *desc;
137 bool seen_current = false;
141 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
142 __func__, ioat->head, ioat->tail, ioat->issued);
144 active = ioat2_ring_active(ioat);
145 for (i = 0; i < active && !seen_current; i++) {
146 prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1));
147 desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
149 dump_desc_dbg(ioat, desc);
151 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
152 chan->completed_cookie = tx->cookie;
155 tx->callback(tx->callback_param);
160 if (tx->phys == phys_complete)
164 BUG_ON(!seen_current); /* no active descs have written a completion? */
166 chan->last_completion = phys_complete;
167 if (ioat->head == ioat->tail) {
168 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
170 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
175 * ioat2_cleanup - clean finished descriptors (advance tail pointer)
176 * @chan: ioat channel to be cleaned up
178 static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
180 struct ioat_chan_common *chan = &ioat->base;
181 unsigned long phys_complete;
183 prefetch(chan->completion);
185 if (!spin_trylock_bh(&chan->cleanup_lock))
188 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
189 spin_unlock_bh(&chan->cleanup_lock);
193 if (!spin_trylock_bh(&ioat->ring_lock)) {
194 spin_unlock_bh(&chan->cleanup_lock);
198 __cleanup(ioat, phys_complete);
200 spin_unlock_bh(&ioat->ring_lock);
201 spin_unlock_bh(&chan->cleanup_lock);
204 static void ioat2_cleanup_tasklet(unsigned long data)
206 struct ioat2_dma_chan *ioat = (void *) data;
209 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
212 static void __restart_chan(struct ioat2_dma_chan *ioat)
214 struct ioat_chan_common *chan = &ioat->base;
216 /* set the tail to be re-issued */
217 ioat->issued = ioat->tail;
219 set_bit(IOAT_COMPLETION_PENDING, &chan->state);
220 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
222 dev_dbg(to_dev(chan),
223 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
224 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
226 if (ioat2_ring_pending(ioat)) {
227 struct ioat_ring_ent *desc;
229 desc = ioat2_get_ring_ent(ioat, ioat->tail);
230 ioat2_set_chainaddr(ioat, desc->txd.phys);
231 __ioat2_issue_pending(ioat);
233 __ioat2_start_null_desc(ioat);
236 static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
238 struct ioat_chan_common *chan = &ioat->base;
239 unsigned long phys_complete;
242 status = ioat_chansts(chan);
243 if (is_ioat_active(status) || is_ioat_idle(status))
245 while (is_ioat_active(status) || is_ioat_idle(status)) {
246 status = ioat_chansts(chan);
250 if (ioat_cleanup_preamble(chan, &phys_complete))
251 __cleanup(ioat, phys_complete);
253 __restart_chan(ioat);
256 static void ioat2_timer_event(unsigned long data)
258 struct ioat2_dma_chan *ioat = (void *) data;
259 struct ioat_chan_common *chan = &ioat->base;
261 spin_lock_bh(&chan->cleanup_lock);
262 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
263 unsigned long phys_complete;
266 spin_lock_bh(&ioat->ring_lock);
267 status = ioat_chansts(chan);
269 /* when halted due to errors check for channel
270 * programming errors before advancing the completion state
272 if (is_ioat_halted(status)) {
275 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
276 BUG_ON(is_ioat_bug(chanerr));
279 /* if we haven't made progress and we have already
280 * acknowledged a pending completion once, then be more
281 * forceful with a restart
283 if (ioat_cleanup_preamble(chan, &phys_complete))
284 __cleanup(ioat, phys_complete);
285 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
286 ioat2_restart_channel(ioat);
288 set_bit(IOAT_COMPLETION_ACK, &chan->state);
289 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
291 spin_unlock_bh(&ioat->ring_lock);
293 spin_unlock_bh(&chan->cleanup_lock);
297 * ioat2_enumerate_channels - find and initialize the device's channels
298 * @device: the device to be enumerated
300 static int ioat2_enumerate_channels(struct ioatdma_device *device)
302 struct ioat2_dma_chan *ioat;
303 struct device *dev = &device->pdev->dev;
304 struct dma_device *dma = &device->common;
308 INIT_LIST_HEAD(&dma->channels);
309 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
310 dma->chancnt &= 0x1f; /* bits [4:0] valid */
311 if (dma->chancnt > ARRAY_SIZE(device->idx)) {
312 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
313 dma->chancnt, ARRAY_SIZE(device->idx));
314 dma->chancnt = ARRAY_SIZE(device->idx);
316 xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
317 xfercap_log &= 0x1f; /* bits [4:0] valid */
318 if (xfercap_log == 0)
320 dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
322 /* FIXME which i/oat version is i7300? */
323 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
324 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
327 for (i = 0; i < dma->chancnt; i++) {
328 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
332 ioat_init_channel(device, &ioat->base, i,
334 ioat2_cleanup_tasklet,
335 (unsigned long) ioat);
336 ioat->xfercap_log = xfercap_log;
337 spin_lock_init(&ioat->ring_lock);
343 static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
345 struct dma_chan *c = tx->chan;
346 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
347 struct ioat_chan_common *chan = &ioat->base;
348 dma_cookie_t cookie = c->cookie;
355 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
357 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
358 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
359 ioat2_update_pending(ioat);
360 spin_unlock_bh(&ioat->ring_lock);
365 static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan)
367 struct ioat_dma_descriptor *hw;
368 struct ioat_ring_ent *desc;
369 struct ioatdma_device *dma;
372 dma = to_ioatdma_device(chan->device);
373 hw = pci_pool_alloc(dma->dma_pool, GFP_KERNEL, &phys);
376 memset(hw, 0, sizeof(*hw));
378 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
380 pci_pool_free(dma->dma_pool, hw, phys);
384 dma_async_tx_descriptor_init(&desc->txd, chan);
385 desc->txd.tx_submit = ioat2_tx_submit_unlock;
387 desc->txd.phys = phys;
391 static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
393 struct ioatdma_device *dma;
395 dma = to_ioatdma_device(chan->device);
396 pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
400 /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
401 * @chan: channel to be initialized
403 static int ioat2_alloc_chan_resources(struct dma_chan *c)
405 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
406 struct ioat_chan_common *chan = &ioat->base;
407 struct ioat_ring_ent **ring;
412 /* have we already been set up? */
414 return 1 << ioat->alloc_order;
416 /* Setup register to interrupt and write completion status on error */
417 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
419 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
421 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
422 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
425 /* allocate a completion writeback area */
426 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
427 chan->completion = pci_pool_alloc(chan->device->completion_pool,
428 GFP_KERNEL, &chan->completion_dma);
429 if (!chan->completion)
432 memset(chan->completion, 0, sizeof(*chan->completion));
433 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
434 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
435 writel(((u64) chan->completion_dma) >> 32,
436 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
438 ioat->alloc_order = ioat_get_alloc_order();
439 descs = 1 << ioat->alloc_order;
441 /* allocate the array to hold the software ring */
442 ring = kcalloc(descs, sizeof(*ring), GFP_KERNEL);
445 for (i = 0; i < descs; i++) {
446 ring[i] = ioat2_alloc_ring_ent(c);
449 ioat2_free_ring_ent(ring[i], c);
453 set_desc_id(ring[i], i);
457 for (i = 0; i < descs-1; i++) {
458 struct ioat_ring_ent *next = ring[i+1];
459 struct ioat_dma_descriptor *hw = ring[i]->hw;
461 hw->next = next->txd.phys;
463 ring[i]->hw->next = ring[0]->txd.phys;
465 spin_lock_bh(&ioat->ring_lock);
471 spin_unlock_bh(&ioat->ring_lock);
473 tasklet_enable(&chan->cleanup_task);
474 ioat2_start_null_desc(ioat);
480 * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops
481 * @idx: gets starting descriptor index on successful allocation
482 * @ioat: ioat2,3 channel (ring) to operate on
483 * @num_descs: allocation length
485 static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
487 struct ioat_chan_common *chan = &ioat->base;
489 spin_lock_bh(&ioat->ring_lock);
490 if (unlikely(ioat2_ring_space(ioat) < num_descs)) {
491 if (printk_ratelimit())
492 dev_dbg(to_dev(chan),
493 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
494 __func__, num_descs, ioat->head, ioat->tail,
496 spin_unlock_bh(&ioat->ring_lock);
498 /* progress reclaim in the allocation failure case we
499 * may be called under bh_disabled so we need to trigger
500 * the timer event directly
502 spin_lock_bh(&chan->cleanup_lock);
503 if (jiffies > chan->timer.expires &&
504 timer_pending(&chan->timer)) {
505 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
506 spin_unlock_bh(&chan->cleanup_lock);
507 ioat2_timer_event((unsigned long) ioat);
509 spin_unlock_bh(&chan->cleanup_lock);
513 dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
514 __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
516 *idx = ioat2_desc_alloc(ioat, num_descs);
517 return 0; /* with ioat->ring_lock held */
520 static struct dma_async_tx_descriptor *
521 ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
522 dma_addr_t dma_src, size_t len, unsigned long flags)
524 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
525 struct ioat_dma_descriptor *hw;
526 struct ioat_ring_ent *desc;
527 dma_addr_t dst = dma_dest;
528 dma_addr_t src = dma_src;
529 size_t total_len = len;
534 num_descs = ioat2_xferlen_to_descs(ioat, len);
535 if (likely(num_descs) &&
536 ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0)
540 for (i = 0; i < num_descs; i++) {
541 size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log);
543 desc = ioat2_get_ring_ent(ioat, idx + i);
554 dump_desc_dbg(ioat, desc);
557 desc->txd.flags = flags;
558 desc->len = total_len;
559 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
560 hw->ctl_f.compl_write = 1;
561 dump_desc_dbg(ioat, desc);
562 /* we leave the channel locked to ensure in order submission */
568 * ioat2_free_chan_resources - release all the descriptors
569 * @chan: the channel to be cleaned
571 static void ioat2_free_chan_resources(struct dma_chan *c)
573 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
574 struct ioat_chan_common *chan = &ioat->base;
575 struct ioatdma_device *ioatdma_device = chan->device;
576 struct ioat_ring_ent *desc;
577 const u16 total_descs = 1 << ioat->alloc_order;
581 /* Before freeing channel resources first check
582 * if they have been previously allocated for this channel.
587 tasklet_disable(&chan->cleanup_task);
588 del_timer_sync(&chan->timer);
591 /* Delay 100ms after reset to allow internal DMA logic to quiesce
592 * before removing DMA descriptor resources.
594 writeb(IOAT_CHANCMD_RESET,
595 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
598 spin_lock_bh(&ioat->ring_lock);
599 descs = ioat2_ring_space(ioat);
600 dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
601 for (i = 0; i < descs; i++) {
602 desc = ioat2_get_ring_ent(ioat, ioat->head + i);
603 ioat2_free_ring_ent(desc, c);
606 if (descs < total_descs)
607 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
608 total_descs - descs);
610 for (i = 0; i < total_descs - descs; i++) {
611 desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
612 dump_desc_dbg(ioat, desc);
613 ioat2_free_ring_ent(desc, c);
618 ioat->alloc_order = 0;
619 pci_pool_free(ioatdma_device->completion_pool,
621 chan->completion_dma);
622 spin_unlock_bh(&ioat->ring_lock);
624 chan->last_completion = 0;
625 chan->completion_dma = 0;
630 static enum dma_status
631 ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
632 dma_cookie_t *done, dma_cookie_t *used)
634 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
636 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
641 return ioat_is_complete(c, cookie, done, used);
644 int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
646 struct pci_dev *pdev = device->pdev;
647 struct dma_device *dma;
649 struct ioat_chan_common *chan;
652 device->enumerate_channels = ioat2_enumerate_channels;
653 dma = &device->common;
654 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
655 dma->device_issue_pending = ioat2_issue_pending;
656 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
657 dma->device_free_chan_resources = ioat2_free_chan_resources;
658 dma->device_is_tx_complete = ioat2_is_complete;
660 err = ioat_probe(device);
663 ioat_set_tcp_copy_break(2048);
665 list_for_each_entry(c, &dma->channels, device_node) {
666 chan = to_chan_common(c);
667 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
668 chan->reg_base + IOAT_DCACTRL_OFFSET);
671 err = ioat_register(device);
675 device->dca = ioat2_dca_init(pdev, device->reg_base);
680 int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
682 struct pci_dev *pdev = device->pdev;
683 struct dma_device *dma;
685 struct ioat_chan_common *chan;
689 device->enumerate_channels = ioat2_enumerate_channels;
690 dma = &device->common;
691 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
692 dma->device_issue_pending = ioat2_issue_pending;
693 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
694 dma->device_free_chan_resources = ioat2_free_chan_resources;
695 dma->device_is_tx_complete = ioat2_is_complete;
697 /* -= IOAT ver.3 workarounds =- */
698 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
699 * that can cause stability issues for IOAT ver.3
701 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
703 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
704 * (workaround for spurious config parity error after restart)
706 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
707 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
708 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
710 err = ioat_probe(device);
713 ioat_set_tcp_copy_break(262144);
715 list_for_each_entry(c, &dma->channels, device_node) {
716 chan = to_chan_common(c);
717 writel(IOAT_DMA_DCA_ANY_CPU,
718 chan->reg_base + IOAT_DCACTRL_OFFSET);
721 err = ioat_register(device);
725 device->dca = ioat3_dca_init(pdev, device->reg_base);