]>
Commit | Line | Data |
---|---|---|
5cbafa65 DW |
1 | /* |
2 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License as published by the Free | |
6 | * Software Foundation; either version 2 of the License, or (at your option) | |
7 | * any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | |
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in the | |
19 | * file called COPYING. | |
20 | */ | |
21 | #ifndef IOATDMA_V2_H | |
22 | #define IOATDMA_V2_H | |
23 | ||
24 | #include <linux/dmaengine.h> | |
25 | #include "dma.h" | |
26 | #include "hw.h" | |
27 | ||
28 | ||
29 | extern int ioat_pending_level; | |
30 | ||
31 | /* | |
32 | * workaround for IOAT ver.3.0 null descriptor issue | |
33 | * (channel returns error when size is 0) | |
34 | */ | |
35 | #define NULL_DESC_BUFFER_SIZE 1 | |
36 | ||
37 | #define IOAT_MAX_ORDER 16 | |
38 | #define ioat_get_alloc_order() \ | |
39 | (min(ioat_ring_alloc_order, IOAT_MAX_ORDER)) | |
40 | ||
41 | /* struct ioat2_dma_chan - ioat v2 / v3 channel attributes | |
42 | * @base: common ioat channel parameters | |
43 | * @xfercap_log; log2 of channel max transfer length (for fast division) | |
44 | * @head: allocated index | |
45 | * @issued: hardware notification point | |
46 | * @tail: cleanup index | |
47 | * @pending: lock free indicator for issued != head | |
48 | * @dmacount: identical to 'head' except for occasionally resetting to zero | |
49 | * @alloc_order: log2 of the number of allocated descriptors | |
50 | * @ring: software ring buffer implementation of hardware ring | |
51 | * @ring_lock: protects ring attributes | |
52 | */ | |
53 | struct ioat2_dma_chan { | |
54 | struct ioat_chan_common base; | |
55 | size_t xfercap_log; | |
56 | u16 head; | |
57 | u16 issued; | |
58 | u16 tail; | |
59 | u16 dmacount; | |
60 | u16 alloc_order; | |
61 | int pending; | |
62 | struct ioat_ring_ent **ring; | |
63 | spinlock_t ring_lock; | |
64 | }; | |
65 | ||
66 | static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) | |
67 | { | |
68 | struct ioat_chan_common *chan = to_chan_common(c); | |
69 | ||
70 | return container_of(chan, struct ioat2_dma_chan, base); | |
71 | } | |
72 | ||
73 | static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat) | |
74 | { | |
75 | return (1 << ioat->alloc_order) - 1; | |
76 | } | |
77 | ||
78 | /* count of descriptors in flight with the engine */ | |
79 | static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat) | |
80 | { | |
81 | return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat); | |
82 | } | |
83 | ||
84 | /* count of descriptors pending submission to hardware */ | |
85 | static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) | |
86 | { | |
87 | return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat); | |
88 | } | |
89 | ||
90 | static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) | |
91 | { | |
92 | u16 num_descs = ioat2_ring_mask(ioat) + 1; | |
93 | u16 active = ioat2_ring_active(ioat); | |
94 | ||
95 | BUG_ON(active > num_descs); | |
96 | ||
97 | return num_descs - active; | |
98 | } | |
99 | ||
100 | /* assumes caller already checked space */ | |
101 | static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len) | |
102 | { | |
103 | ioat->head += len; | |
104 | return ioat->head - len; | |
105 | } | |
106 | ||
107 | static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len) | |
108 | { | |
109 | u16 num_descs = len >> ioat->xfercap_log; | |
110 | ||
111 | num_descs += !!(len & ((1 << ioat->xfercap_log) - 1)); | |
112 | return num_descs; | |
113 | } | |
114 | ||
115 | struct ioat_ring_ent { | |
116 | struct ioat_dma_descriptor *hw; | |
117 | struct dma_async_tx_descriptor txd; | |
118 | size_t len; | |
6df9183a DW |
119 | #ifdef DEBUG |
120 | int id; | |
121 | #endif | |
5cbafa65 DW |
122 | }; |
123 | ||
124 | static inline struct ioat_ring_ent * | |
125 | ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) | |
126 | { | |
127 | return ioat->ring[idx & ioat2_ring_mask(ioat)]; | |
128 | } | |
129 | ||
09c8a5b8 DW |
130 | static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) |
131 | { | |
132 | struct ioat_chan_common *chan = &ioat->base; | |
133 | ||
134 | writel(addr & 0x00000000FFFFFFFF, | |
135 | chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | |
136 | writel(addr >> 32, | |
137 | chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | |
138 | } | |
139 | ||
345d8523 DW |
140 | int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca); |
141 | int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); | |
142 | struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); | |
143 | struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); | |
5cbafa65 | 144 | #endif /* IOATDMA_V2_H */ |