2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 * The full GNU General Public License is included in this distribution in
23 * the file called "COPYING".
27 * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions are met:
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
43 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
46 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
47 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
48 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
50 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
51 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
52 * POSSIBILITY OF SUCH DAMAGE.
56 * Support routines for v3+ hardware
59 #include <linux/pci.h>
60 #include <linux/dmaengine.h>
61 #include <linux/dma-mapping.h>
62 #include "registers.h"
67 /* ioat hardware assumes at least two sources for raid operations */
68 #define src_cnt_to_sw(x) ((x) + 2)
69 #define src_cnt_to_hw(x) ((x) - 2)
71 /* provide a lookup table for setting the source address in the base or
72 * extended descriptor of an xor or pq descriptor
74 static const u8 xor_idx_to_desc __read_mostly = 0xd0;
75 static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
76 static const u8 pq_idx_to_desc __read_mostly = 0xf8;
77 static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
79 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
81 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
83 return raw->field[xor_idx_to_field[idx]];
86 static void xor_set_src(struct ioat_raw_descriptor *descs[2],
87 dma_addr_t addr, u32 offset, int idx)
89 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
91 raw->field[xor_idx_to_field[idx]] = addr + offset;
94 static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
96 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
98 return raw->field[pq_idx_to_field[idx]];
101 static void pq_set_src(struct ioat_raw_descriptor *descs[2],
102 dma_addr_t addr, u32 offset, u8 coef, int idx)
104 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
105 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
107 raw->field[pq_idx_to_field[idx]] = addr + offset;
108 pq->coef[idx] = coef;
111 static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
112 struct ioat_ring_ent *desc, int idx)
114 struct ioat_chan_common *chan = &ioat->base;
115 struct pci_dev *pdev = chan->device->pdev;
116 size_t len = desc->len;
117 size_t offset = len - desc->hw->size;
118 struct dma_async_tx_descriptor *tx = &desc->txd;
119 enum dma_ctrl_flags flags = tx->flags;
121 switch (desc->hw->ctl_f.op) {
123 ioat_dma_unmap(chan, flags, len, desc->hw);
126 struct ioat_fill_descriptor *hw = desc->fill;
128 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
129 ioat_unmap(pdev, hw->dst_addr - offset, len,
130 PCI_DMA_FROMDEVICE, flags, 1);
133 case IOAT_OP_XOR_VAL:
135 struct ioat_xor_descriptor *xor = desc->xor;
136 struct ioat_ring_ent *ext;
137 struct ioat_xor_ext_descriptor *xor_ex = NULL;
138 int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt);
139 struct ioat_raw_descriptor *descs[2];
143 ext = ioat2_get_ring_ent(ioat, idx + 1);
144 xor_ex = ext->xor_ex;
147 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
148 descs[0] = (struct ioat_raw_descriptor *) xor;
149 descs[1] = (struct ioat_raw_descriptor *) xor_ex;
150 for (i = 0; i < src_cnt; i++) {
151 dma_addr_t src = xor_get_src(descs, i);
153 ioat_unmap(pdev, src - offset, len,
154 PCI_DMA_TODEVICE, flags, 0);
157 /* dest is a source in xor validate operations */
158 if (xor->ctl_f.op == IOAT_OP_XOR_VAL) {
159 ioat_unmap(pdev, xor->dst_addr - offset, len,
160 PCI_DMA_TODEVICE, flags, 1);
165 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
166 ioat_unmap(pdev, xor->dst_addr - offset, len,
167 PCI_DMA_FROMDEVICE, flags, 1);
172 struct ioat_pq_descriptor *pq = desc->pq;
173 struct ioat_ring_ent *ext;
174 struct ioat_pq_ext_descriptor *pq_ex = NULL;
175 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
176 struct ioat_raw_descriptor *descs[2];
180 ext = ioat2_get_ring_ent(ioat, idx + 1);
184 /* in the 'continue' case don't unmap the dests as sources */
185 if (dmaf_p_disabled_continue(flags))
187 else if (dmaf_continue(flags))
190 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
191 descs[0] = (struct ioat_raw_descriptor *) pq;
192 descs[1] = (struct ioat_raw_descriptor *) pq_ex;
193 for (i = 0; i < src_cnt; i++) {
194 dma_addr_t src = pq_get_src(descs, i);
196 ioat_unmap(pdev, src - offset, len,
197 PCI_DMA_TODEVICE, flags, 0);
200 /* the dests are sources in pq validate operations */
201 if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
202 if (!(flags & DMA_PREP_PQ_DISABLE_P))
203 ioat_unmap(pdev, pq->p_addr - offset,
204 len, PCI_DMA_TODEVICE, flags, 0);
205 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
206 ioat_unmap(pdev, pq->q_addr - offset,
207 len, PCI_DMA_TODEVICE, flags, 0);
212 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
213 if (!(flags & DMA_PREP_PQ_DISABLE_P))
214 ioat_unmap(pdev, pq->p_addr - offset, len,
215 PCI_DMA_BIDIRECTIONAL, flags, 1);
216 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
217 ioat_unmap(pdev, pq->q_addr - offset, len,
218 PCI_DMA_BIDIRECTIONAL, flags, 1);
223 dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
224 __func__, desc->hw->ctl_f.op);
228 static bool desc_has_ext(struct ioat_ring_ent *desc)
230 struct ioat_dma_descriptor *hw = desc->hw;
232 if (hw->ctl_f.op == IOAT_OP_XOR ||
233 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
234 struct ioat_xor_descriptor *xor = desc->xor;
236 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
238 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
239 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
240 struct ioat_pq_descriptor *pq = desc->pq;
242 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
250 * __cleanup - reclaim used descriptors
251 * @ioat: channel (ring) to clean
253 * The difference from the dma_v2.c __cleanup() is that this routine
254 * handles extended descriptors and dma-unmapping raid operations.
256 static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
258 struct ioat_chan_common *chan = &ioat->base;
259 struct ioat_ring_ent *desc;
260 bool seen_current = false;
264 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
265 __func__, ioat->head, ioat->tail, ioat->issued);
267 active = ioat2_ring_active(ioat);
268 for (i = 0; i < active && !seen_current; i++) {
269 struct dma_async_tx_descriptor *tx;
271 prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1));
272 desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
273 dump_desc_dbg(ioat, desc);
276 chan->completed_cookie = tx->cookie;
277 ioat3_dma_unmap(ioat, desc, ioat->tail + i);
280 tx->callback(tx->callback_param);
285 if (tx->phys == phys_complete)
288 /* skip extended descriptors */
289 if (desc_has_ext(desc)) {
290 BUG_ON(i + 1 >= active);
295 BUG_ON(!seen_current); /* no active descs have written a completion? */
296 chan->last_completion = phys_complete;
297 if (ioat->head == ioat->tail) {
298 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
300 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
301 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
305 static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
307 struct ioat_chan_common *chan = &ioat->base;
308 unsigned long phys_complete;
310 prefetch(chan->completion);
312 if (!spin_trylock_bh(&chan->cleanup_lock))
315 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
316 spin_unlock_bh(&chan->cleanup_lock);
320 if (!spin_trylock_bh(&ioat->ring_lock)) {
321 spin_unlock_bh(&chan->cleanup_lock);
325 __cleanup(ioat, phys_complete);
327 spin_unlock_bh(&ioat->ring_lock);
328 spin_unlock_bh(&chan->cleanup_lock);
331 static void ioat3_cleanup_tasklet(unsigned long data)
333 struct ioat2_dma_chan *ioat = (void *) data;
336 writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN,
337 ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
340 static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
342 struct ioat_chan_common *chan = &ioat->base;
343 unsigned long phys_complete;
346 status = ioat_chansts(chan);
347 if (is_ioat_active(status) || is_ioat_idle(status))
349 while (is_ioat_active(status) || is_ioat_idle(status)) {
350 status = ioat_chansts(chan);
354 if (ioat_cleanup_preamble(chan, &phys_complete))
355 __cleanup(ioat, phys_complete);
357 __ioat2_restart_chan(ioat);
360 static void ioat3_timer_event(unsigned long data)
362 struct ioat2_dma_chan *ioat = (void *) data;
363 struct ioat_chan_common *chan = &ioat->base;
365 spin_lock_bh(&chan->cleanup_lock);
366 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
367 unsigned long phys_complete;
370 spin_lock_bh(&ioat->ring_lock);
371 status = ioat_chansts(chan);
373 /* when halted due to errors check for channel
374 * programming errors before advancing the completion state
376 if (is_ioat_halted(status)) {
379 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
380 BUG_ON(is_ioat_bug(chanerr));
383 /* if we haven't made progress and we have already
384 * acknowledged a pending completion once, then be more
385 * forceful with a restart
387 if (ioat_cleanup_preamble(chan, &phys_complete))
388 __cleanup(ioat, phys_complete);
389 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
390 ioat3_restart_channel(ioat);
392 set_bit(IOAT_COMPLETION_ACK, &chan->state);
393 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
395 spin_unlock_bh(&ioat->ring_lock);
399 /* if the ring is idle, empty, and oversized try to step
402 spin_lock_bh(&ioat->ring_lock);
403 active = ioat2_ring_active(ioat);
404 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
405 reshape_ring(ioat, ioat->alloc_order-1);
406 spin_unlock_bh(&ioat->ring_lock);
408 /* keep shrinking until we get back to our minimum
411 if (ioat->alloc_order > ioat_get_alloc_order())
412 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
414 spin_unlock_bh(&chan->cleanup_lock);
417 static enum dma_status
418 ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie,
419 dma_cookie_t *done, dma_cookie_t *used)
421 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
423 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
428 return ioat_is_complete(c, cookie, done, used);
431 static struct dma_async_tx_descriptor *
432 ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value,
433 size_t len, unsigned long flags)
435 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
436 struct ioat_ring_ent *desc;
437 size_t total_len = len;
438 struct ioat_fill_descriptor *fill;
440 u64 src_data = (0x0101010101010101ULL) * (value & 0xff);
444 num_descs = ioat2_xferlen_to_descs(ioat, len);
445 if (likely(num_descs) &&
446 ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0)
450 for (i = 0; i < num_descs; i++) {
451 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
453 desc = ioat2_get_ring_ent(ioat, idx + i);
456 fill->size = xfer_size;
457 fill->src_data = src_data;
458 fill->dst_addr = dest;
460 fill->ctl_f.op = IOAT_OP_FILL;
464 dump_desc_dbg(ioat, desc);
467 desc->txd.flags = flags;
468 desc->len = total_len;
469 fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
470 fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
471 fill->ctl_f.compl_write = 1;
472 dump_desc_dbg(ioat, desc);
474 /* we leave the channel locked to ensure in order submission */
478 static struct dma_async_tx_descriptor *
479 __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
480 dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
481 size_t len, unsigned long flags)
483 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
484 struct ioat_ring_ent *compl_desc;
485 struct ioat_ring_ent *desc;
486 struct ioat_ring_ent *ext;
487 size_t total_len = len;
488 struct ioat_xor_descriptor *xor;
489 struct ioat_xor_ext_descriptor *xor_ex = NULL;
490 struct ioat_dma_descriptor *hw;
496 u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
500 num_descs = ioat2_xferlen_to_descs(ioat, len);
501 /* we need 2x the number of descriptors to cover greater than 5
510 /* completion writes from the raid engine may pass completion
511 * writes from the legacy engine, so we need one extra null
512 * (legacy) descriptor to ensure all completion writes arrive in
515 if (likely(num_descs) &&
516 ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0)
520 for (i = 0; i < num_descs; i += 1 + with_ext) {
521 struct ioat_raw_descriptor *descs[2];
522 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
525 desc = ioat2_get_ring_ent(ioat, idx + i);
528 /* save a branch by unconditionally retrieving the
529 * extended descriptor xor_set_src() knows to not write
530 * to it in the single descriptor case
532 ext = ioat2_get_ring_ent(ioat, idx + i + 1);
533 xor_ex = ext->xor_ex;
535 descs[0] = (struct ioat_raw_descriptor *) xor;
536 descs[1] = (struct ioat_raw_descriptor *) xor_ex;
537 for (s = 0; s < src_cnt; s++)
538 xor_set_src(descs, src[s], offset, s);
539 xor->size = xfer_size;
540 xor->dst_addr = dest + offset;
543 xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
547 dump_desc_dbg(ioat, desc);
550 /* last xor descriptor carries the unmap parameters and fence bit */
551 desc->txd.flags = flags;
552 desc->len = total_len;
554 desc->result = result;
555 xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
557 /* completion descriptor carries interrupt bit */
558 compl_desc = ioat2_get_ring_ent(ioat, idx + i);
559 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
563 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
564 hw->ctl_f.compl_write = 1;
565 hw->size = NULL_DESC_BUFFER_SIZE;
566 dump_desc_dbg(ioat, compl_desc);
568 /* we leave the channel locked to ensure in order submission */
572 static struct dma_async_tx_descriptor *
573 ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
574 unsigned int src_cnt, size_t len, unsigned long flags)
576 return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
579 struct dma_async_tx_descriptor *
580 ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
581 unsigned int src_cnt, size_t len,
582 enum sum_check_flags *result, unsigned long flags)
584 /* the cleanup routine only sets bits on validate failure, it
585 * does not clear bits on validate success... so clear it here
589 return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
590 src_cnt - 1, len, flags);
594 dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
596 struct device *dev = to_dev(&ioat->base);
597 struct ioat_pq_descriptor *pq = desc->pq;
598 struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
599 struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
600 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
603 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
604 " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n",
605 desc_id(desc), (unsigned long long) desc->txd.phys,
606 (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
607 desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
608 pq->ctl_f.compl_write,
609 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
611 for (i = 0; i < src_cnt; i++)
612 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
613 (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
614 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
615 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
618 static struct dma_async_tx_descriptor *
619 __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
620 const dma_addr_t *dst, const dma_addr_t *src,
621 unsigned int src_cnt, const unsigned char *scf,
622 size_t len, unsigned long flags)
624 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
625 struct ioat_chan_common *chan = &ioat->base;
626 struct ioat_ring_ent *compl_desc;
627 struct ioat_ring_ent *desc;
628 struct ioat_ring_ent *ext;
629 size_t total_len = len;
630 struct ioat_pq_descriptor *pq;
631 struct ioat_pq_ext_descriptor *pq_ex = NULL;
632 struct ioat_dma_descriptor *hw;
638 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
640 dev_dbg(to_dev(chan), "%s\n", __func__);
641 /* the engine requires at least two sources (we provide
642 * at least 1 implied source in the DMA_PREP_CONTINUE case)
644 BUG_ON(src_cnt + dmaf_continue(flags) < 2);
646 num_descs = ioat2_xferlen_to_descs(ioat, len);
647 /* we need 2x the number of descriptors to cover greater than 3
650 if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) {
656 /* completion writes from the raid engine may pass completion
657 * writes from the legacy engine, so we need one extra null
658 * (legacy) descriptor to ensure all completion writes arrive in
661 if (likely(num_descs) &&
662 ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0)
666 for (i = 0; i < num_descs; i += 1 + with_ext) {
667 struct ioat_raw_descriptor *descs[2];
668 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
670 desc = ioat2_get_ring_ent(ioat, idx + i);
673 /* save a branch by unconditionally retrieving the
674 * extended descriptor pq_set_src() knows to not write
675 * to it in the single descriptor case
677 ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
680 descs[0] = (struct ioat_raw_descriptor *) pq;
681 descs[1] = (struct ioat_raw_descriptor *) pq_ex;
683 for (s = 0; s < src_cnt; s++)
684 pq_set_src(descs, src[s], offset, scf[s], s);
686 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
687 if (dmaf_p_disabled_continue(flags))
688 pq_set_src(descs, dst[1], offset, 1, s++);
689 else if (dmaf_continue(flags)) {
690 pq_set_src(descs, dst[0], offset, 0, s++);
691 pq_set_src(descs, dst[1], offset, 1, s++);
692 pq_set_src(descs, dst[1], offset, 0, s++);
694 pq->size = xfer_size;
695 pq->p_addr = dst[0] + offset;
696 pq->q_addr = dst[1] + offset;
699 pq->ctl_f.src_cnt = src_cnt_to_hw(s);
700 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
701 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
707 /* last pq descriptor carries the unmap parameters and fence bit */
708 desc->txd.flags = flags;
709 desc->len = total_len;
711 desc->result = result;
712 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
713 dump_pq_desc_dbg(ioat, desc, ext);
715 /* completion descriptor carries interrupt bit */
716 compl_desc = ioat2_get_ring_ent(ioat, idx + i);
717 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
721 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
722 hw->ctl_f.compl_write = 1;
723 hw->size = NULL_DESC_BUFFER_SIZE;
724 dump_desc_dbg(ioat, compl_desc);
726 /* we leave the channel locked to ensure in order submission */
730 static struct dma_async_tx_descriptor *
731 ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
732 unsigned int src_cnt, const unsigned char *scf, size_t len,
735 /* handle the single source multiply case from the raid6
738 if (unlikely((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1)) {
739 dma_addr_t single_source[2];
740 unsigned char single_source_coef[2];
742 BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
743 single_source[0] = src[0];
744 single_source[1] = src[0];
745 single_source_coef[0] = scf[0];
746 single_source_coef[1] = 0;
748 return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
749 single_source_coef, len, flags);
751 return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf,
755 struct dma_async_tx_descriptor *
756 ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
757 unsigned int src_cnt, const unsigned char *scf, size_t len,
758 enum sum_check_flags *pqres, unsigned long flags)
760 /* the cleanup routine only sets bits on validate failure, it
761 * does not clear bits on validate success... so clear it here
765 return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
769 static struct dma_async_tx_descriptor *
770 ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
771 unsigned int src_cnt, size_t len, unsigned long flags)
773 unsigned char scf[src_cnt];
776 memset(scf, 0, src_cnt);
777 flags |= DMA_PREP_PQ_DISABLE_Q;
781 return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
785 struct dma_async_tx_descriptor *
786 ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
787 unsigned int src_cnt, size_t len,
788 enum sum_check_flags *result, unsigned long flags)
790 unsigned char scf[src_cnt];
793 /* the cleanup routine only sets bits on validate failure, it
794 * does not clear bits on validate success... so clear it here
798 memset(scf, 0, src_cnt);
799 flags |= DMA_PREP_PQ_DISABLE_Q;
803 return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
807 static void __devinit ioat3_dma_test_callback(void *dma_async_param)
809 struct completion *cmp = dma_async_param;
814 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
815 static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
819 struct page *xor_srcs[IOAT_NUM_SRC_TEST];
820 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
821 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
822 dma_addr_t dma_addr, dest_dma;
823 struct dma_async_tx_descriptor *tx;
824 struct dma_chan *dma_chan;
830 struct completion cmp;
832 struct device *dev = &device->pdev->dev;
833 struct dma_device *dma = &device->common;
835 dev_dbg(dev, "%s\n", __func__);
837 if (!dma_has_cap(DMA_XOR, dma->cap_mask))
840 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
841 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
842 if (!xor_srcs[src_idx]) {
844 __free_page(xor_srcs[src_idx]);
849 dest = alloc_page(GFP_KERNEL);
852 __free_page(xor_srcs[src_idx]);
856 /* Fill in src buffers */
857 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
858 u8 *ptr = page_address(xor_srcs[src_idx]);
859 for (i = 0; i < PAGE_SIZE; i++)
860 ptr[i] = (1 << src_idx);
863 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
864 cmp_byte ^= (u8) (1 << src_idx);
866 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
867 (cmp_byte << 8) | cmp_byte;
869 memset(page_address(dest), 0, PAGE_SIZE);
871 dma_chan = container_of(dma->channels.next, struct dma_chan,
873 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
879 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
880 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
881 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
883 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
884 IOAT_NUM_SRC_TEST, PAGE_SIZE,
888 dev_err(dev, "Self-test xor prep failed\n");
894 init_completion(&cmp);
895 tx->callback = ioat3_dma_test_callback;
896 tx->callback_param = &cmp;
897 cookie = tx->tx_submit(tx);
899 dev_err(dev, "Self-test xor setup failed\n");
903 dma->device_issue_pending(dma_chan);
905 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
907 if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
908 dev_err(dev, "Self-test xor timed out\n");
913 dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
914 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
915 u32 *ptr = page_address(dest);
916 if (ptr[i] != cmp_word) {
917 dev_err(dev, "Self-test xor failed compare\n");
922 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE);
924 /* skip validate if the capability is not present */
925 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
928 /* validate the sources with the destintation page */
929 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
930 xor_val_srcs[i] = xor_srcs[i];
931 xor_val_srcs[i] = dest;
935 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
936 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
938 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
939 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
940 &xor_val_result, DMA_PREP_INTERRUPT);
942 dev_err(dev, "Self-test zero prep failed\n");
948 init_completion(&cmp);
949 tx->callback = ioat3_dma_test_callback;
950 tx->callback_param = &cmp;
951 cookie = tx->tx_submit(tx);
953 dev_err(dev, "Self-test zero setup failed\n");
957 dma->device_issue_pending(dma_chan);
959 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
961 if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
962 dev_err(dev, "Self-test validate timed out\n");
967 if (xor_val_result != 0) {
968 dev_err(dev, "Self-test validate failed compare\n");
973 /* skip memset if the capability is not present */
974 if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask))
978 dma_addr = dma_map_page(dev, dest, 0,
979 PAGE_SIZE, DMA_FROM_DEVICE);
980 tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
983 dev_err(dev, "Self-test memset prep failed\n");
989 init_completion(&cmp);
990 tx->callback = ioat3_dma_test_callback;
991 tx->callback_param = &cmp;
992 cookie = tx->tx_submit(tx);
994 dev_err(dev, "Self-test memset setup failed\n");
998 dma->device_issue_pending(dma_chan);
1000 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1002 if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1003 dev_err(dev, "Self-test memset timed out\n");
1005 goto free_resources;
1008 for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
1009 u32 *ptr = page_address(dest);
1011 dev_err(dev, "Self-test memset failed compare\n");
1013 goto free_resources;
1017 /* test for non-zero parity sum */
1019 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1020 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1022 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1023 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1024 &xor_val_result, DMA_PREP_INTERRUPT);
1026 dev_err(dev, "Self-test 2nd zero prep failed\n");
1028 goto free_resources;
1032 init_completion(&cmp);
1033 tx->callback = ioat3_dma_test_callback;
1034 tx->callback_param = &cmp;
1035 cookie = tx->tx_submit(tx);
1037 dev_err(dev, "Self-test 2nd zero setup failed\n");
1039 goto free_resources;
1041 dma->device_issue_pending(dma_chan);
1043 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1045 if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1046 dev_err(dev, "Self-test 2nd validate timed out\n");
1048 goto free_resources;
1051 if (xor_val_result != SUM_CHECK_P_RESULT) {
1052 dev_err(dev, "Self-test validate failed compare\n");
1054 goto free_resources;
1058 dma->device_free_chan_resources(dma_chan);
1060 src_idx = IOAT_NUM_SRC_TEST;
1062 __free_page(xor_srcs[src_idx]);
1067 static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
1069 int rc = ioat_dma_self_test(device);
1074 rc = ioat_xor_val_self_test(device);
1081 int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1083 struct pci_dev *pdev = device->pdev;
1084 struct dma_device *dma;
1086 struct ioat_chan_common *chan;
1091 device->enumerate_channels = ioat2_enumerate_channels;
1092 device->cleanup_tasklet = ioat3_cleanup_tasklet;
1093 device->timer_fn = ioat3_timer_event;
1094 device->self_test = ioat3_dma_self_test;
1095 dma = &device->common;
1096 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
1097 dma->device_issue_pending = ioat2_issue_pending;
1098 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
1099 dma->device_free_chan_resources = ioat2_free_chan_resources;
1100 dma->device_is_tx_complete = ioat3_is_complete;
1101 cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
1102 if (cap & IOAT_CAP_FILL_BLOCK) {
1103 dma_cap_set(DMA_MEMSET, dma->cap_mask);
1104 dma->device_prep_dma_memset = ioat3_prep_memset_lock;
1106 if (cap & IOAT_CAP_XOR) {
1110 dma_cap_set(DMA_XOR, dma->cap_mask);
1111 dma->device_prep_dma_xor = ioat3_prep_xor;
1113 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1114 dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
1116 if (cap & IOAT_CAP_PQ) {
1117 dma_set_maxpq(dma, 8, 0);
1120 dma_cap_set(DMA_PQ, dma->cap_mask);
1121 dma->device_prep_dma_pq = ioat3_prep_pq;
1123 dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
1124 dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
1126 if (!(cap & IOAT_CAP_XOR)) {
1130 dma_cap_set(DMA_XOR, dma->cap_mask);
1131 dma->device_prep_dma_xor = ioat3_prep_pqxor;
1133 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1134 dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
1138 /* -= IOAT ver.3 workarounds =- */
1139 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1140 * that can cause stability issues for IOAT ver.3
1142 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1144 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1145 * (workaround for spurious config parity error after restart)
1147 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1148 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1149 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1151 err = ioat_probe(device);
1154 ioat_set_tcp_copy_break(262144);
1156 list_for_each_entry(c, &dma->channels, device_node) {
1157 chan = to_chan_common(c);
1158 writel(IOAT_DMA_DCA_ANY_CPU,
1159 chan->reg_base + IOAT_DCACTRL_OFFSET);
1162 err = ioat_register(device);
1166 ioat_kobject_add(device, &ioat2_ktype);
1169 device->dca = ioat3_dca_init(pdev, device->reg_base);