]>
Commit | Line | Data |
---|---|---|
20f733e7 BR |
1 | /* |
2 | * sata_mv.c - Marvell SATA support | |
3 | * | |
e12bef50 | 4 | * Copyright 2008: Marvell Corporation, all rights reserved. |
8b260248 | 5 | * Copyright 2005: EMC Corporation, all rights reserved. |
e2b1be56 | 6 | * Copyright 2005 Red Hat, Inc. All rights reserved. |
20f733e7 BR |
7 | * |
8 | * Please ALWAYS copy linux-ide@vger.kernel.org on emails. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License as published by | |
12 | * the Free Software Foundation; version 2 of the License. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software | |
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
22 | * | |
23 | */ | |
24 | ||
4a05e209 | 25 | /* |
85afb934 ML |
26 | * sata_mv TODO list: |
27 | * | |
28 | * --> Errata workaround for NCQ device errors. | |
29 | * | |
30 | * --> More errata workarounds for PCI-X. | |
31 | * | |
32 | * --> Complete a full errata audit for all chipsets to identify others. | |
33 | * | |
85afb934 ML |
34 | * --> Develop a low-power-consumption strategy, and implement it. |
35 | * | |
36 | * --> [Experiment, low priority] Investigate interrupt coalescing. | |
37 | * Quite often, especially with PCI Message Signalled Interrupts (MSI), | |
38 | * the overhead reduced by interrupt mitigation is quite often not | |
39 | * worth the latency cost. | |
40 | * | |
41 | * --> [Experiment, Marvell value added] Is it possible to use target | |
42 | * mode to cross-connect two Linux boxes with Marvell cards? If so, | |
43 | * creating LibATA target mode support would be very interesting. | |
44 | * | |
45 | * Target mode, for those without docs, is the ability to directly | |
46 | * connect two SATA ports. | |
47 | */ | |
4a05e209 | 48 | |
20f733e7 BR |
49 | #include <linux/kernel.h> |
50 | #include <linux/module.h> | |
51 | #include <linux/pci.h> | |
52 | #include <linux/init.h> | |
53 | #include <linux/blkdev.h> | |
54 | #include <linux/delay.h> | |
55 | #include <linux/interrupt.h> | |
8d8b6004 | 56 | #include <linux/dmapool.h> |
20f733e7 | 57 | #include <linux/dma-mapping.h> |
a9524a76 | 58 | #include <linux/device.h> |
f351b2d6 SB |
59 | #include <linux/platform_device.h> |
60 | #include <linux/ata_platform.h> | |
15a32632 | 61 | #include <linux/mbus.h> |
c46938cc | 62 | #include <linux/bitops.h> |
20f733e7 | 63 | #include <scsi/scsi_host.h> |
193515d5 | 64 | #include <scsi/scsi_cmnd.h> |
6c08772e | 65 | #include <scsi/scsi_device.h> |
20f733e7 | 66 | #include <linux/libata.h> |
20f733e7 BR |
67 | |
68 | #define DRV_NAME "sata_mv" | |
da14265e | 69 | #define DRV_VERSION "1.26" |
20f733e7 BR |
70 | |
71 | enum { | |
72 | /* BAR's are enumerated in terms of pci_resource_start() terms */ | |
73 | MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ | |
74 | MV_IO_BAR = 2, /* offset 0x18: IO space */ | |
75 | MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ | |
76 | ||
77 | MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ | |
78 | MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ | |
79 | ||
80 | MV_PCI_REG_BASE = 0, | |
81 | MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ | |
615ab953 ML |
82 | MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08), |
83 | MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88), | |
84 | MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c), | |
85 | MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc), | |
86 | MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), | |
87 | ||
20f733e7 | 88 | MV_SATAHC0_REG_BASE = 0x20000, |
8e7decdb ML |
89 | MV_FLASH_CTL_OFS = 0x1046c, |
90 | MV_GPIO_PORT_CTL_OFS = 0x104f0, | |
91 | MV_RESET_CFG_OFS = 0x180d8, | |
20f733e7 BR |
92 | |
93 | MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, | |
94 | MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, | |
95 | MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ | |
96 | MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, | |
97 | ||
31961943 BR |
98 | MV_MAX_Q_DEPTH = 32, |
99 | MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, | |
100 | ||
101 | /* CRQB needs alignment on a 1KB boundary. Size == 1KB | |
102 | * CRPB needs alignment on a 256B boundary. Size == 256B | |
31961943 BR |
103 | * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B |
104 | */ | |
105 | MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), | |
106 | MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), | |
da2fa9ba | 107 | MV_MAX_SG_CT = 256, |
31961943 | 108 | MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), |
31961943 | 109 | |
352fab70 | 110 | /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */ |
20f733e7 | 111 | MV_PORT_HC_SHIFT = 2, |
352fab70 ML |
112 | MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */ |
113 | /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */ | |
114 | MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */ | |
20f733e7 BR |
115 | |
116 | /* Host Flags */ | |
117 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ | |
118 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | |
7bb3c529 | 119 | |
c5d3e45a | 120 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
91b1a84c | 121 | ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING, |
ad3aef51 | 122 | |
91b1a84c | 123 | MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI, |
20f733e7 | 124 | |
91b1a84c | 125 | MV_GEN_II_FLAGS = MV_COMMON_FLAGS | MV_FLAG_IRQ_COALESCE | |
ad3aef51 | 126 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | |
da14265e | 127 | ATA_FLAG_NCQ, |
91b1a84c ML |
128 | |
129 | MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN, | |
ad3aef51 | 130 | |
31961943 BR |
131 | CRQB_FLAG_READ = (1 << 0), |
132 | CRQB_TAG_SHIFT = 1, | |
c5d3e45a | 133 | CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ |
e12bef50 | 134 | CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ |
c5d3e45a | 135 | CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ |
31961943 BR |
136 | CRQB_CMD_ADDR_SHIFT = 8, |
137 | CRQB_CMD_CS = (0x2 << 11), | |
138 | CRQB_CMD_LAST = (1 << 15), | |
139 | ||
140 | CRPB_FLAG_STATUS_SHIFT = 8, | |
c5d3e45a JG |
141 | CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ |
142 | CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ | |
31961943 BR |
143 | |
144 | EPRD_FLAG_END_OF_TBL = (1 << 31), | |
145 | ||
20f733e7 BR |
146 | /* PCI interface registers */ |
147 | ||
31961943 | 148 | PCI_COMMAND_OFS = 0xc00, |
8e7decdb | 149 | PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ |
31961943 | 150 | |
20f733e7 BR |
151 | PCI_MAIN_CMD_STS_OFS = 0xd30, |
152 | STOP_PCI_MASTER = (1 << 2), | |
153 | PCI_MASTER_EMPTY = (1 << 3), | |
154 | GLOB_SFT_RST = (1 << 4), | |
155 | ||
8e7decdb ML |
156 | MV_PCI_MODE_OFS = 0xd00, |
157 | MV_PCI_MODE_MASK = 0x30, | |
158 | ||
522479fb JG |
159 | MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, |
160 | MV_PCI_DISC_TIMER = 0xd04, | |
161 | MV_PCI_MSI_TRIGGER = 0xc38, | |
162 | MV_PCI_SERR_MASK = 0xc28, | |
8e7decdb | 163 | MV_PCI_XBAR_TMOUT_OFS = 0x1d04, |
522479fb JG |
164 | MV_PCI_ERR_LOW_ADDRESS = 0x1d40, |
165 | MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, | |
166 | MV_PCI_ERR_ATTRIBUTE = 0x1d48, | |
167 | MV_PCI_ERR_COMMAND = 0x1d50, | |
168 | ||
02a121da ML |
169 | PCI_IRQ_CAUSE_OFS = 0x1d58, |
170 | PCI_IRQ_MASK_OFS = 0x1d5c, | |
20f733e7 BR |
171 | PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ |
172 | ||
02a121da ML |
173 | PCIE_IRQ_CAUSE_OFS = 0x1900, |
174 | PCIE_IRQ_MASK_OFS = 0x1910, | |
646a4da5 | 175 | PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ |
02a121da | 176 | |
7368f919 ML |
177 | /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */ |
178 | PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, | |
179 | PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64, | |
180 | SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020, | |
181 | SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024, | |
352fab70 ML |
182 | ERR_IRQ = (1 << 0), /* shift by port # */ |
183 | DONE_IRQ = (1 << 1), /* shift by port # */ | |
20f733e7 BR |
184 | HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ |
185 | HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ | |
186 | PCI_ERR = (1 << 18), | |
187 | TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */ | |
188 | TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */ | |
fb621e2f JG |
189 | PORTS_0_3_COAL_DONE = (1 << 8), |
190 | PORTS_4_7_COAL_DONE = (1 << 17), | |
20f733e7 BR |
191 | PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */ |
192 | GPIO_INT = (1 << 22), | |
193 | SELF_INT = (1 << 23), | |
194 | TWSI_INT = (1 << 24), | |
195 | HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ | |
fb621e2f | 196 | HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ |
e12bef50 | 197 | HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ |
20f733e7 BR |
198 | |
199 | /* SATAHC registers */ | |
200 | HC_CFG_OFS = 0, | |
201 | ||
202 | HC_IRQ_CAUSE_OFS = 0x14, | |
352fab70 ML |
203 | DMA_IRQ = (1 << 0), /* shift by port # */ |
204 | HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ | |
20f733e7 BR |
205 | DEV_IRQ = (1 << 8), /* shift by port # */ |
206 | ||
207 | /* Shadow block registers */ | |
31961943 BR |
208 | SHD_BLK_OFS = 0x100, |
209 | SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ | |
20f733e7 BR |
210 | |
211 | /* SATA registers */ | |
212 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ | |
213 | SATA_ACTIVE_OFS = 0x350, | |
0c58912e | 214 | SATA_FIS_IRQ_CAUSE_OFS = 0x364, |
c443c500 | 215 | SATA_FIS_IRQ_AN = (1 << 9), /* async notification */ |
17c5aab5 | 216 | |
e12bef50 | 217 | LTMODE_OFS = 0x30c, |
17c5aab5 ML |
218 | LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ |
219 | ||
47c2b677 | 220 | PHY_MODE3 = 0x310, |
bca1c4eb | 221 | PHY_MODE4 = 0x314, |
ba069e37 ML |
222 | PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ |
223 | PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ | |
224 | PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ | |
225 | PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ | |
226 | ||
bca1c4eb | 227 | PHY_MODE2 = 0x330, |
e12bef50 | 228 | SATA_IFCTL_OFS = 0x344, |
8e7decdb | 229 | SATA_TESTCTL_OFS = 0x348, |
e12bef50 ML |
230 | SATA_IFSTAT_OFS = 0x34c, |
231 | VENDOR_UNIQUE_FIS_OFS = 0x35c, | |
17c5aab5 | 232 | |
8e7decdb ML |
233 | FISCFG_OFS = 0x360, |
234 | FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ | |
235 | FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ | |
17c5aab5 | 236 | |
c9d39130 | 237 | MV5_PHY_MODE = 0x74, |
8e7decdb ML |
238 | MV5_LTMODE_OFS = 0x30, |
239 | MV5_PHY_CTL_OFS = 0x0C, | |
240 | SATA_INTERFACE_CFG_OFS = 0x050, | |
bca1c4eb JG |
241 | |
242 | MV_M2_PREAMP_MASK = 0x7e0, | |
20f733e7 BR |
243 | |
244 | /* Port registers */ | |
245 | EDMA_CFG_OFS = 0, | |
0c58912e ML |
246 | EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ |
247 | EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ | |
248 | EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ | |
249 | EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ | |
250 | EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ | |
e12bef50 ML |
251 | EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ |
252 | EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ | |
20f733e7 BR |
253 | |
254 | EDMA_ERR_IRQ_CAUSE_OFS = 0x8, | |
255 | EDMA_ERR_IRQ_MASK_OFS = 0xc, | |
6c1153e0 JG |
256 | EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ |
257 | EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ | |
258 | EDMA_ERR_DEV = (1 << 2), /* device error */ | |
259 | EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ | |
260 | EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ | |
261 | EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ | |
c5d3e45a JG |
262 | EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ |
263 | EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ | |
6c1153e0 | 264 | EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ |
c5d3e45a | 265 | EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ |
6c1153e0 JG |
266 | EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ |
267 | EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ | |
268 | EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ | |
269 | EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ | |
646a4da5 | 270 | |
6c1153e0 | 271 | EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ |
646a4da5 ML |
272 | EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ |
273 | EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ | |
274 | EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ | |
275 | EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ | |
276 | ||
6c1153e0 | 277 | EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ |
646a4da5 | 278 | |
6c1153e0 | 279 | EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ |
646a4da5 ML |
280 | EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ |
281 | EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ | |
282 | EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ | |
283 | EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ | |
284 | EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ | |
285 | ||
6c1153e0 | 286 | EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ |
646a4da5 | 287 | |
6c1153e0 | 288 | EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ |
c5d3e45a JG |
289 | EDMA_ERR_OVERRUN_5 = (1 << 5), |
290 | EDMA_ERR_UNDERRUN_5 = (1 << 6), | |
646a4da5 ML |
291 | |
292 | EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | | |
293 | EDMA_ERR_LNK_CTRL_RX_1 | | |
294 | EDMA_ERR_LNK_CTRL_RX_3 | | |
85afb934 | 295 | EDMA_ERR_LNK_CTRL_TX, |
646a4da5 | 296 | |
bdd4ddde JG |
297 | EDMA_EH_FREEZE = EDMA_ERR_D_PAR | |
298 | EDMA_ERR_PRD_PAR | | |
299 | EDMA_ERR_DEV_DCON | | |
300 | EDMA_ERR_DEV_CON | | |
301 | EDMA_ERR_SERR | | |
302 | EDMA_ERR_SELF_DIS | | |
6c1153e0 | 303 | EDMA_ERR_CRQB_PAR | |
bdd4ddde JG |
304 | EDMA_ERR_CRPB_PAR | |
305 | EDMA_ERR_INTRL_PAR | | |
306 | EDMA_ERR_IORDY | | |
307 | EDMA_ERR_LNK_CTRL_RX_2 | | |
308 | EDMA_ERR_LNK_DATA_RX | | |
309 | EDMA_ERR_LNK_DATA_TX | | |
310 | EDMA_ERR_TRANS_PROTO, | |
e12bef50 | 311 | |
bdd4ddde JG |
312 | EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | |
313 | EDMA_ERR_PRD_PAR | | |
314 | EDMA_ERR_DEV_DCON | | |
315 | EDMA_ERR_DEV_CON | | |
316 | EDMA_ERR_OVERRUN_5 | | |
317 | EDMA_ERR_UNDERRUN_5 | | |
318 | EDMA_ERR_SELF_DIS_5 | | |
6c1153e0 | 319 | EDMA_ERR_CRQB_PAR | |
bdd4ddde JG |
320 | EDMA_ERR_CRPB_PAR | |
321 | EDMA_ERR_INTRL_PAR | | |
322 | EDMA_ERR_IORDY, | |
20f733e7 | 323 | |
31961943 BR |
324 | EDMA_REQ_Q_BASE_HI_OFS = 0x10, |
325 | EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ | |
31961943 BR |
326 | |
327 | EDMA_REQ_Q_OUT_PTR_OFS = 0x18, | |
328 | EDMA_REQ_Q_PTR_SHIFT = 5, | |
329 | ||
330 | EDMA_RSP_Q_BASE_HI_OFS = 0x1c, | |
331 | EDMA_RSP_Q_IN_PTR_OFS = 0x20, | |
332 | EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ | |
31961943 BR |
333 | EDMA_RSP_Q_PTR_SHIFT = 3, |
334 | ||
0ea9e179 JG |
335 | EDMA_CMD_OFS = 0x28, /* EDMA command register */ |
336 | EDMA_EN = (1 << 0), /* enable EDMA */ | |
337 | EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ | |
8e7decdb ML |
338 | EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ |
339 | ||
340 | EDMA_STATUS_OFS = 0x30, /* EDMA engine status */ | |
341 | EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ | |
342 | EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ | |
20f733e7 | 343 | |
8e7decdb ML |
344 | EDMA_IORDY_TMOUT_OFS = 0x34, |
345 | EDMA_ARB_CFG_OFS = 0x38, | |
346 | ||
347 | EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */ | |
c01e8a23 | 348 | EDMA_UNKNOWN_RSVD_OFS = 0x6C, /* GenIIe unknown/reserved */ |
da14265e ML |
349 | |
350 | BMDMA_CMD_OFS = 0x224, /* bmdma command register */ | |
351 | BMDMA_STATUS_OFS = 0x228, /* bmdma status register */ | |
352 | BMDMA_PRD_LOW_OFS = 0x22c, /* bmdma PRD addr 31:0 */ | |
353 | BMDMA_PRD_HIGH_OFS = 0x230, /* bmdma PRD addr 63:32 */ | |
354 | ||
31961943 BR |
355 | /* Host private flags (hp_flags) */ |
356 | MV_HP_FLAG_MSI = (1 << 0), | |
47c2b677 JG |
357 | MV_HP_ERRATA_50XXB0 = (1 << 1), |
358 | MV_HP_ERRATA_50XXB2 = (1 << 2), | |
359 | MV_HP_ERRATA_60X1B2 = (1 << 3), | |
360 | MV_HP_ERRATA_60X1C0 = (1 << 4), | |
0ea9e179 JG |
361 | MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ |
362 | MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ | |
363 | MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ | |
02a121da | 364 | MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ |
616d4a98 | 365 | MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ |
1f398472 | 366 | MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ |
20f733e7 | 367 | |
31961943 | 368 | /* Port private flags (pp_flags) */ |
0ea9e179 | 369 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ |
72109168 | 370 | MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ |
00f42eab | 371 | MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ |
29d187bb | 372 | MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ |
d16ab3f6 | 373 | MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */ |
20f733e7 BR |
374 | }; |
375 | ||
ee9ccdf7 JG |
376 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) |
377 | #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) | |
e4e7b892 | 378 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) |
8e7decdb | 379 | #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) |
1f398472 | 380 | #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) |
bca1c4eb | 381 | |
15a32632 LB |
382 | #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) |
383 | #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) | |
384 | ||
095fec88 | 385 | enum { |
baf14aa1 JG |
386 | /* DMA boundary 0xffff is required by the s/g splitting |
387 | * we need on /length/ in mv_fill-sg(). | |
388 | */ | |
389 | MV_DMA_BOUNDARY = 0xffffU, | |
095fec88 | 390 | |
0ea9e179 JG |
391 | /* mask of register bits containing lower 32 bits |
392 | * of EDMA request queue DMA address | |
393 | */ | |
095fec88 JG |
394 | EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, |
395 | ||
0ea9e179 | 396 | /* ditto, for response queue */ |
095fec88 JG |
397 | EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, |
398 | }; | |
399 | ||
522479fb JG |
400 | enum chip_type { |
401 | chip_504x, | |
402 | chip_508x, | |
403 | chip_5080, | |
404 | chip_604x, | |
405 | chip_608x, | |
e4e7b892 JG |
406 | chip_6042, |
407 | chip_7042, | |
f351b2d6 | 408 | chip_soc, |
522479fb JG |
409 | }; |
410 | ||
31961943 BR |
411 | /* Command ReQuest Block: 32B */ |
412 | struct mv_crqb { | |
e1469874 ML |
413 | __le32 sg_addr; |
414 | __le32 sg_addr_hi; | |
415 | __le16 ctrl_flags; | |
416 | __le16 ata_cmd[11]; | |
31961943 | 417 | }; |
20f733e7 | 418 | |
e4e7b892 | 419 | struct mv_crqb_iie { |
e1469874 ML |
420 | __le32 addr; |
421 | __le32 addr_hi; | |
422 | __le32 flags; | |
423 | __le32 len; | |
424 | __le32 ata_cmd[4]; | |
e4e7b892 JG |
425 | }; |
426 | ||
31961943 BR |
427 | /* Command ResPonse Block: 8B */ |
428 | struct mv_crpb { | |
e1469874 ML |
429 | __le16 id; |
430 | __le16 flags; | |
431 | __le32 tmstmp; | |
20f733e7 BR |
432 | }; |
433 | ||
31961943 BR |
434 | /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ |
435 | struct mv_sg { | |
e1469874 ML |
436 | __le32 addr; |
437 | __le32 flags_size; | |
438 | __le32 addr_hi; | |
439 | __le32 reserved; | |
31961943 | 440 | }; |
20f733e7 | 441 | |
08da1759 ML |
442 | /* |
443 | * We keep a local cache of a few frequently accessed port | |
444 | * registers here, to avoid having to read them (very slow) | |
445 | * when switching between EDMA and non-EDMA modes. | |
446 | */ | |
447 | struct mv_cached_regs { | |
448 | u32 fiscfg; | |
449 | u32 ltmode; | |
450 | u32 haltcond; | |
c01e8a23 | 451 | u32 unknown_rsvd; |
08da1759 ML |
452 | }; |
453 | ||
31961943 BR |
454 | struct mv_port_priv { |
455 | struct mv_crqb *crqb; | |
456 | dma_addr_t crqb_dma; | |
457 | struct mv_crpb *crpb; | |
458 | dma_addr_t crpb_dma; | |
eb73d558 ML |
459 | struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; |
460 | dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; | |
bdd4ddde JG |
461 | |
462 | unsigned int req_idx; | |
463 | unsigned int resp_idx; | |
464 | ||
31961943 | 465 | u32 pp_flags; |
08da1759 | 466 | struct mv_cached_regs cached; |
29d187bb | 467 | unsigned int delayed_eh_pmp_map; |
31961943 BR |
468 | }; |
469 | ||
bca1c4eb JG |
470 | struct mv_port_signal { |
471 | u32 amps; | |
472 | u32 pre; | |
473 | }; | |
474 | ||
02a121da ML |
475 | struct mv_host_priv { |
476 | u32 hp_flags; | |
96e2c487 | 477 | u32 main_irq_mask; |
02a121da ML |
478 | struct mv_port_signal signal[8]; |
479 | const struct mv_hw_ops *ops; | |
f351b2d6 SB |
480 | int n_ports; |
481 | void __iomem *base; | |
7368f919 ML |
482 | void __iomem *main_irq_cause_addr; |
483 | void __iomem *main_irq_mask_addr; | |
02a121da ML |
484 | u32 irq_cause_ofs; |
485 | u32 irq_mask_ofs; | |
486 | u32 unmask_all_irqs; | |
da2fa9ba ML |
487 | /* |
488 | * These consistent DMA memory pools give us guaranteed | |
489 | * alignment for hardware-accessed data structures, | |
490 | * and less memory waste in accomplishing the alignment. | |
491 | */ | |
492 | struct dma_pool *crqb_pool; | |
493 | struct dma_pool *crpb_pool; | |
494 | struct dma_pool *sg_tbl_pool; | |
02a121da ML |
495 | }; |
496 | ||
47c2b677 | 497 | struct mv_hw_ops { |
2a47ce06 JG |
498 | void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, |
499 | unsigned int port); | |
47c2b677 JG |
500 | void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio); |
501 | void (*read_preamp)(struct mv_host_priv *hpriv, int idx, | |
502 | void __iomem *mmio); | |
c9d39130 JG |
503 | int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, |
504 | unsigned int n_hc); | |
522479fb | 505 | void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); |
7bb3c529 | 506 | void (*reset_bus)(struct ata_host *host, void __iomem *mmio); |
47c2b677 JG |
507 | }; |
508 | ||
82ef04fb TH |
509 | static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); |
510 | static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); | |
511 | static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); | |
512 | static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); | |
31961943 BR |
513 | static int mv_port_start(struct ata_port *ap); |
514 | static void mv_port_stop(struct ata_port *ap); | |
3e4a1391 | 515 | static int mv_qc_defer(struct ata_queued_cmd *qc); |
31961943 | 516 | static void mv_qc_prep(struct ata_queued_cmd *qc); |
e4e7b892 | 517 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc); |
9a3d9eb0 | 518 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); |
a1efdaba TH |
519 | static int mv_hardreset(struct ata_link *link, unsigned int *class, |
520 | unsigned long deadline); | |
bdd4ddde JG |
521 | static void mv_eh_freeze(struct ata_port *ap); |
522 | static void mv_eh_thaw(struct ata_port *ap); | |
f273827e | 523 | static void mv6_dev_config(struct ata_device *dev); |
20f733e7 | 524 | |
2a47ce06 JG |
525 | static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
526 | unsigned int port); | |
47c2b677 JG |
527 | static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); |
528 | static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, | |
529 | void __iomem *mmio); | |
c9d39130 JG |
530 | static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, |
531 | unsigned int n_hc); | |
522479fb | 532 | static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); |
7bb3c529 | 533 | static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); |
47c2b677 | 534 | |
2a47ce06 JG |
535 | static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
536 | unsigned int port); | |
47c2b677 JG |
537 | static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); |
538 | static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, | |
539 | void __iomem *mmio); | |
c9d39130 JG |
540 | static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, |
541 | unsigned int n_hc); | |
522479fb | 542 | static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); |
f351b2d6 SB |
543 | static void mv_soc_enable_leds(struct mv_host_priv *hpriv, |
544 | void __iomem *mmio); | |
545 | static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, | |
546 | void __iomem *mmio); | |
547 | static int mv_soc_reset_hc(struct mv_host_priv *hpriv, | |
548 | void __iomem *mmio, unsigned int n_hc); | |
549 | static void mv_soc_reset_flash(struct mv_host_priv *hpriv, | |
550 | void __iomem *mmio); | |
551 | static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); | |
7bb3c529 | 552 | static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); |
e12bef50 | 553 | static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, |
c9d39130 | 554 | unsigned int port_no); |
e12bef50 | 555 | static int mv_stop_edma(struct ata_port *ap); |
b562468c | 556 | static int mv_stop_edma_engine(void __iomem *port_mmio); |
00b81235 | 557 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma); |
47c2b677 | 558 | |
e49856d8 ML |
559 | static void mv_pmp_select(struct ata_port *ap, int pmp); |
560 | static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, | |
561 | unsigned long deadline); | |
562 | static int mv_softreset(struct ata_link *link, unsigned int *class, | |
563 | unsigned long deadline); | |
29d187bb | 564 | static void mv_pmp_error_handler(struct ata_port *ap); |
4c299ca3 ML |
565 | static void mv_process_crpb_entries(struct ata_port *ap, |
566 | struct mv_port_priv *pp); | |
47c2b677 | 567 | |
da14265e ML |
568 | static void mv_sff_irq_clear(struct ata_port *ap); |
569 | static int mv_check_atapi_dma(struct ata_queued_cmd *qc); | |
570 | static void mv_bmdma_setup(struct ata_queued_cmd *qc); | |
571 | static void mv_bmdma_start(struct ata_queued_cmd *qc); | |
572 | static void mv_bmdma_stop(struct ata_queued_cmd *qc); | |
573 | static u8 mv_bmdma_status(struct ata_port *ap); | |
d16ab3f6 | 574 | static u8 mv_sff_check_status(struct ata_port *ap); |
da14265e | 575 | |
eb73d558 ML |
576 | /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below |
577 | * because we have to allow room for worst case splitting of | |
578 | * PRDs for 64K boundaries in mv_fill_sg(). | |
579 | */ | |
c5d3e45a | 580 | static struct scsi_host_template mv5_sht = { |
68d1d07b | 581 | ATA_BASE_SHT(DRV_NAME), |
baf14aa1 | 582 | .sg_tablesize = MV_MAX_SG_CT / 2, |
c5d3e45a | 583 | .dma_boundary = MV_DMA_BOUNDARY, |
c5d3e45a JG |
584 | }; |
585 | ||
586 | static struct scsi_host_template mv6_sht = { | |
68d1d07b | 587 | ATA_NCQ_SHT(DRV_NAME), |
138bfdd0 | 588 | .can_queue = MV_MAX_Q_DEPTH - 1, |
baf14aa1 | 589 | .sg_tablesize = MV_MAX_SG_CT / 2, |
20f733e7 | 590 | .dma_boundary = MV_DMA_BOUNDARY, |
20f733e7 BR |
591 | }; |
592 | ||
029cfd6b TH |
593 | static struct ata_port_operations mv5_ops = { |
594 | .inherits = &ata_sff_port_ops, | |
c9d39130 | 595 | |
3e4a1391 | 596 | .qc_defer = mv_qc_defer, |
c9d39130 JG |
597 | .qc_prep = mv_qc_prep, |
598 | .qc_issue = mv_qc_issue, | |
c9d39130 | 599 | |
bdd4ddde JG |
600 | .freeze = mv_eh_freeze, |
601 | .thaw = mv_eh_thaw, | |
a1efdaba | 602 | .hardreset = mv_hardreset, |
a1efdaba | 603 | .error_handler = ata_std_error_handler, /* avoid SFF EH */ |
029cfd6b | 604 | .post_internal_cmd = ATA_OP_NULL, |
bdd4ddde | 605 | |
c9d39130 JG |
606 | .scr_read = mv5_scr_read, |
607 | .scr_write = mv5_scr_write, | |
608 | ||
609 | .port_start = mv_port_start, | |
610 | .port_stop = mv_port_stop, | |
c9d39130 JG |
611 | }; |
612 | ||
029cfd6b TH |
613 | static struct ata_port_operations mv6_ops = { |
614 | .inherits = &mv5_ops, | |
f273827e | 615 | .dev_config = mv6_dev_config, |
20f733e7 BR |
616 | .scr_read = mv_scr_read, |
617 | .scr_write = mv_scr_write, | |
618 | ||
e49856d8 ML |
619 | .pmp_hardreset = mv_pmp_hardreset, |
620 | .pmp_softreset = mv_softreset, | |
621 | .softreset = mv_softreset, | |
29d187bb | 622 | .error_handler = mv_pmp_error_handler, |
da14265e | 623 | |
d16ab3f6 | 624 | .sff_check_status = mv_sff_check_status, |
da14265e ML |
625 | .sff_irq_clear = mv_sff_irq_clear, |
626 | .check_atapi_dma = mv_check_atapi_dma, | |
627 | .bmdma_setup = mv_bmdma_setup, | |
628 | .bmdma_start = mv_bmdma_start, | |
629 | .bmdma_stop = mv_bmdma_stop, | |
630 | .bmdma_status = mv_bmdma_status, | |
20f733e7 BR |
631 | }; |
632 | ||
029cfd6b TH |
633 | static struct ata_port_operations mv_iie_ops = { |
634 | .inherits = &mv6_ops, | |
635 | .dev_config = ATA_OP_NULL, | |
e4e7b892 | 636 | .qc_prep = mv_qc_prep_iie, |
e4e7b892 JG |
637 | }; |
638 | ||
98ac62de | 639 | static const struct ata_port_info mv_port_info[] = { |
20f733e7 | 640 | { /* chip_504x */ |
91b1a84c | 641 | .flags = MV_GEN_I_FLAGS, |
31961943 | 642 | .pio_mask = 0x1f, /* pio0-4 */ |
bf6263a8 | 643 | .udma_mask = ATA_UDMA6, |
c9d39130 | 644 | .port_ops = &mv5_ops, |
20f733e7 BR |
645 | }, |
646 | { /* chip_508x */ | |
91b1a84c | 647 | .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, |
31961943 | 648 | .pio_mask = 0x1f, /* pio0-4 */ |
bf6263a8 | 649 | .udma_mask = ATA_UDMA6, |
c9d39130 | 650 | .port_ops = &mv5_ops, |
20f733e7 | 651 | }, |
47c2b677 | 652 | { /* chip_5080 */ |
91b1a84c | 653 | .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, |
47c2b677 | 654 | .pio_mask = 0x1f, /* pio0-4 */ |
bf6263a8 | 655 | .udma_mask = ATA_UDMA6, |
c9d39130 | 656 | .port_ops = &mv5_ops, |
47c2b677 | 657 | }, |
20f733e7 | 658 | { /* chip_604x */ |
91b1a84c | 659 | .flags = MV_GEN_II_FLAGS, |
31961943 | 660 | .pio_mask = 0x1f, /* pio0-4 */ |
bf6263a8 | 661 | .udma_mask = ATA_UDMA6, |
c9d39130 | 662 | .port_ops = &mv6_ops, |
20f733e7 BR |
663 | }, |
664 | { /* chip_608x */ | |
91b1a84c | 665 | .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC, |
31961943 | 666 | .pio_mask = 0x1f, /* pio0-4 */ |
bf6263a8 | 667 | .udma_mask = ATA_UDMA6, |
c9d39130 | 668 | .port_ops = &mv6_ops, |
20f733e7 | 669 | }, |
e4e7b892 | 670 | { /* chip_6042 */ |
91b1a84c | 671 | .flags = MV_GEN_IIE_FLAGS, |
e4e7b892 | 672 | .pio_mask = 0x1f, /* pio0-4 */ |
bf6263a8 | 673 | .udma_mask = ATA_UDMA6, |
e4e7b892 JG |
674 | .port_ops = &mv_iie_ops, |
675 | }, | |
676 | { /* chip_7042 */ | |
91b1a84c | 677 | .flags = MV_GEN_IIE_FLAGS, |
e4e7b892 | 678 | .pio_mask = 0x1f, /* pio0-4 */ |
bf6263a8 | 679 | .udma_mask = ATA_UDMA6, |
e4e7b892 JG |
680 | .port_ops = &mv_iie_ops, |
681 | }, | |
f351b2d6 | 682 | { /* chip_soc */ |
91b1a84c | 683 | .flags = MV_GEN_IIE_FLAGS, |
17c5aab5 ML |
684 | .pio_mask = 0x1f, /* pio0-4 */ |
685 | .udma_mask = ATA_UDMA6, | |
686 | .port_ops = &mv_iie_ops, | |
f351b2d6 | 687 | }, |
20f733e7 BR |
688 | }; |
689 | ||
3b7d697d | 690 | static const struct pci_device_id mv_pci_tbl[] = { |
2d2744fc JG |
691 | { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, |
692 | { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, | |
693 | { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, | |
694 | { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, | |
46c5784c ML |
695 | /* RocketRAID 1720/174x have different identifiers */ |
696 | { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, | |
4462254a ML |
697 | { PCI_VDEVICE(TTI, 0x1740), chip_6042 }, |
698 | { PCI_VDEVICE(TTI, 0x1742), chip_6042 }, | |
2d2744fc JG |
699 | |
700 | { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, | |
701 | { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, | |
702 | { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, | |
703 | { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, | |
704 | { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, | |
705 | ||
706 | { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, | |
707 | ||
d9f9c6bc FA |
708 | /* Adaptec 1430SA */ |
709 | { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, | |
710 | ||
02a121da | 711 | /* Marvell 7042 support */ |
6a3d586d MT |
712 | { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, |
713 | ||
02a121da ML |
714 | /* Highpoint RocketRAID PCIe series */ |
715 | { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, | |
716 | { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, | |
717 | ||
2d2744fc | 718 | { } /* terminate list */ |
20f733e7 BR |
719 | }; |
720 | ||
47c2b677 JG |
721 | static const struct mv_hw_ops mv5xxx_ops = { |
722 | .phy_errata = mv5_phy_errata, | |
723 | .enable_leds = mv5_enable_leds, | |
724 | .read_preamp = mv5_read_preamp, | |
725 | .reset_hc = mv5_reset_hc, | |
522479fb JG |
726 | .reset_flash = mv5_reset_flash, |
727 | .reset_bus = mv5_reset_bus, | |
47c2b677 JG |
728 | }; |
729 | ||
730 | static const struct mv_hw_ops mv6xxx_ops = { | |
731 | .phy_errata = mv6_phy_errata, | |
732 | .enable_leds = mv6_enable_leds, | |
733 | .read_preamp = mv6_read_preamp, | |
734 | .reset_hc = mv6_reset_hc, | |
522479fb JG |
735 | .reset_flash = mv6_reset_flash, |
736 | .reset_bus = mv_reset_pci_bus, | |
47c2b677 JG |
737 | }; |
738 | ||
f351b2d6 SB |
739 | static const struct mv_hw_ops mv_soc_ops = { |
740 | .phy_errata = mv6_phy_errata, | |
741 | .enable_leds = mv_soc_enable_leds, | |
742 | .read_preamp = mv_soc_read_preamp, | |
743 | .reset_hc = mv_soc_reset_hc, | |
744 | .reset_flash = mv_soc_reset_flash, | |
745 | .reset_bus = mv_soc_reset_bus, | |
746 | }; | |
747 | ||
20f733e7 BR |
748 | /* |
749 | * Functions | |
750 | */ | |
751 | ||
752 | static inline void writelfl(unsigned long data, void __iomem *addr) | |
753 | { | |
754 | writel(data, addr); | |
755 | (void) readl(addr); /* flush to avoid PCI posted write */ | |
756 | } | |
757 | ||
c9d39130 JG |
758 | static inline unsigned int mv_hc_from_port(unsigned int port) |
759 | { | |
760 | return port >> MV_PORT_HC_SHIFT; | |
761 | } | |
762 | ||
763 | static inline unsigned int mv_hardport_from_port(unsigned int port) | |
764 | { | |
765 | return port & MV_PORT_MASK; | |
766 | } | |
767 | ||
1cfd19ae ML |
768 | /* |
769 | * Consolidate some rather tricky bit shift calculations. | |
770 | * This is hot-path stuff, so not a function. | |
771 | * Simple code, with two return values, so macro rather than inline. | |
772 | * | |
773 | * port is the sole input, in range 0..7. | |
7368f919 ML |
774 | * shift is one output, for use with main_irq_cause / main_irq_mask registers. |
775 | * hardport is the other output, in range 0..3. | |
1cfd19ae ML |
776 | * |
777 | * Note that port and hardport may be the same variable in some cases. | |
778 | */ | |
779 | #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \ | |
780 | { \ | |
781 | shift = mv_hc_from_port(port) * HC_SHIFT; \ | |
782 | hardport = mv_hardport_from_port(port); \ | |
783 | shift += hardport * 2; \ | |
784 | } | |
785 | ||
352fab70 ML |
786 | static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) |
787 | { | |
788 | return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); | |
789 | } | |
790 | ||
c9d39130 JG |
791 | static inline void __iomem *mv_hc_base_from_port(void __iomem *base, |
792 | unsigned int port) | |
793 | { | |
794 | return mv_hc_base(base, mv_hc_from_port(port)); | |
795 | } | |
796 | ||
20f733e7 BR |
797 | static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) |
798 | { | |
c9d39130 | 799 | return mv_hc_base_from_port(base, port) + |
8b260248 | 800 | MV_SATAHC_ARBTR_REG_SZ + |
c9d39130 | 801 | (mv_hardport_from_port(port) * MV_PORT_REG_SZ); |
20f733e7 BR |
802 | } |
803 | ||
e12bef50 ML |
804 | static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) |
805 | { | |
806 | void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); | |
807 | unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; | |
808 | ||
809 | return hc_mmio + ofs; | |
810 | } | |
811 | ||
f351b2d6 SB |
812 | static inline void __iomem *mv_host_base(struct ata_host *host) |
813 | { | |
814 | struct mv_host_priv *hpriv = host->private_data; | |
815 | return hpriv->base; | |
816 | } | |
817 | ||
20f733e7 BR |
818 | static inline void __iomem *mv_ap_base(struct ata_port *ap) |
819 | { | |
f351b2d6 | 820 | return mv_port_base(mv_host_base(ap->host), ap->port_no); |
20f733e7 BR |
821 | } |
822 | ||
cca3974e | 823 | static inline int mv_get_hc_count(unsigned long port_flags) |
31961943 | 824 | { |
cca3974e | 825 | return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); |
31961943 BR |
826 | } |
827 | ||
08da1759 ML |
828 | /** |
829 | * mv_save_cached_regs - (re-)initialize cached port registers | |
830 | * @ap: the port whose registers we are caching | |
831 | * | |
832 | * Initialize the local cache of port registers, | |
833 | * so that reading them over and over again can | |
834 | * be avoided on the hotter paths of this driver. | |
835 | * This saves a few microseconds each time we switch | |
836 | * to/from EDMA mode to perform (eg.) a drive cache flush. | |
837 | */ | |
838 | static void mv_save_cached_regs(struct ata_port *ap) | |
839 | { | |
840 | void __iomem *port_mmio = mv_ap_base(ap); | |
841 | struct mv_port_priv *pp = ap->private_data; | |
842 | ||
843 | pp->cached.fiscfg = readl(port_mmio + FISCFG_OFS); | |
844 | pp->cached.ltmode = readl(port_mmio + LTMODE_OFS); | |
845 | pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND_OFS); | |
c01e8a23 | 846 | pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD_OFS); |
08da1759 ML |
847 | } |
848 | ||
849 | /** | |
850 | * mv_write_cached_reg - write to a cached port register | |
851 | * @addr: hardware address of the register | |
852 | * @old: pointer to cached value of the register | |
853 | * @new: new value for the register | |
854 | * | |
855 | * Write a new value to a cached register, | |
856 | * but only if the value is different from before. | |
857 | */ | |
858 | static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new) | |
859 | { | |
860 | if (new != *old) { | |
861 | *old = new; | |
862 | writel(new, addr); | |
863 | } | |
864 | } | |
865 | ||
c5d3e45a JG |
866 | static void mv_set_edma_ptrs(void __iomem *port_mmio, |
867 | struct mv_host_priv *hpriv, | |
868 | struct mv_port_priv *pp) | |
869 | { | |
bdd4ddde JG |
870 | u32 index; |
871 | ||
c5d3e45a JG |
872 | /* |
873 | * initialize request queue | |
874 | */ | |
fcfb1f77 ML |
875 | pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ |
876 | index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; | |
bdd4ddde | 877 | |
c5d3e45a JG |
878 | WARN_ON(pp->crqb_dma & 0x3ff); |
879 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); | |
bdd4ddde | 880 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, |
c5d3e45a | 881 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); |
5cf73bfb | 882 | writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); |
c5d3e45a JG |
883 | |
884 | /* | |
885 | * initialize response queue | |
886 | */ | |
fcfb1f77 ML |
887 | pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ |
888 | index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; | |
bdd4ddde | 889 | |
c5d3e45a JG |
890 | WARN_ON(pp->crpb_dma & 0xff); |
891 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); | |
5cf73bfb | 892 | writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); |
bdd4ddde | 893 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, |
c5d3e45a | 894 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
c5d3e45a JG |
895 | } |
896 | ||
c4de573b ML |
897 | static void mv_set_main_irq_mask(struct ata_host *host, |
898 | u32 disable_bits, u32 enable_bits) | |
899 | { | |
900 | struct mv_host_priv *hpriv = host->private_data; | |
901 | u32 old_mask, new_mask; | |
902 | ||
96e2c487 | 903 | old_mask = hpriv->main_irq_mask; |
c4de573b | 904 | new_mask = (old_mask & ~disable_bits) | enable_bits; |
96e2c487 ML |
905 | if (new_mask != old_mask) { |
906 | hpriv->main_irq_mask = new_mask; | |
c4de573b | 907 | writelfl(new_mask, hpriv->main_irq_mask_addr); |
96e2c487 | 908 | } |
c4de573b ML |
909 | } |
910 | ||
911 | static void mv_enable_port_irqs(struct ata_port *ap, | |
912 | unsigned int port_bits) | |
913 | { | |
914 | unsigned int shift, hardport, port = ap->port_no; | |
915 | u32 disable_bits, enable_bits; | |
916 | ||
917 | MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); | |
918 | ||
919 | disable_bits = (DONE_IRQ | ERR_IRQ) << shift; | |
920 | enable_bits = port_bits << shift; | |
921 | mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); | |
922 | } | |
923 | ||
00b81235 ML |
924 | static void mv_clear_and_enable_port_irqs(struct ata_port *ap, |
925 | void __iomem *port_mmio, | |
926 | unsigned int port_irqs) | |
927 | { | |
928 | struct mv_host_priv *hpriv = ap->host->private_data; | |
929 | int hardport = mv_hardport_from_port(ap->port_no); | |
930 | void __iomem *hc_mmio = mv_hc_base_from_port( | |
931 | mv_host_base(ap->host), ap->port_no); | |
932 | u32 hc_irq_cause; | |
933 | ||
934 | /* clear EDMA event indicators, if any */ | |
935 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | |
936 | ||
937 | /* clear pending irq events */ | |
938 | hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); | |
939 | writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | |
940 | ||
941 | /* clear FIS IRQ Cause */ | |
942 | if (IS_GEN_IIE(hpriv)) | |
943 | writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | |
944 | ||
945 | mv_enable_port_irqs(ap, port_irqs); | |
946 | } | |
947 | ||
05b308e1 | 948 | /** |
00b81235 | 949 | * mv_start_edma - Enable eDMA engine |
05b308e1 BR |
950 | * @base: port base address |
951 | * @pp: port private data | |
952 | * | |
beec7dbc TH |
953 | * Verify the local cache of the eDMA state is accurate with a |
954 | * WARN_ON. | |
05b308e1 BR |
955 | * |
956 | * LOCKING: | |
957 | * Inherited from caller. | |
958 | */ | |
00b81235 | 959 | static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio, |
72109168 | 960 | struct mv_port_priv *pp, u8 protocol) |
20f733e7 | 961 | { |
72109168 ML |
962 | int want_ncq = (protocol == ATA_PROT_NCQ); |
963 | ||
964 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { | |
965 | int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); | |
966 | if (want_ncq != using_ncq) | |
b562468c | 967 | mv_stop_edma(ap); |
72109168 | 968 | } |
c5d3e45a | 969 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { |
0c58912e | 970 | struct mv_host_priv *hpriv = ap->host->private_data; |
0c58912e | 971 | |
00b81235 | 972 | mv_edma_cfg(ap, want_ncq, 1); |
0c58912e | 973 | |
f630d562 | 974 | mv_set_edma_ptrs(port_mmio, hpriv, pp); |
00b81235 | 975 | mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ); |
bdd4ddde | 976 | |
f630d562 | 977 | writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); |
afb0edd9 BR |
978 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; |
979 | } | |
20f733e7 BR |
980 | } |
981 | ||
9b2c4e0b ML |
982 | static void mv_wait_for_edma_empty_idle(struct ata_port *ap) |
983 | { | |
984 | void __iomem *port_mmio = mv_ap_base(ap); | |
985 | const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); | |
986 | const int per_loop = 5, timeout = (15 * 1000 / per_loop); | |
987 | int i; | |
988 | ||
989 | /* | |
990 | * Wait for the EDMA engine to finish transactions in progress. | |
c46938cc ML |
991 | * No idea what a good "timeout" value might be, but measurements |
992 | * indicate that it often requires hundreds of microseconds | |
993 | * with two drives in-use. So we use the 15msec value above | |
994 | * as a rough guess at what even more drives might require. | |
9b2c4e0b ML |
995 | */ |
996 | for (i = 0; i < timeout; ++i) { | |
997 | u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS); | |
998 | if ((edma_stat & empty_idle) == empty_idle) | |
999 | break; | |
1000 | udelay(per_loop); | |
1001 | } | |
1002 | /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */ | |
1003 | } | |
1004 | ||
05b308e1 | 1005 | /** |
e12bef50 | 1006 | * mv_stop_edma_engine - Disable eDMA engine |
b562468c | 1007 | * @port_mmio: io base address |
05b308e1 BR |
1008 | * |
1009 | * LOCKING: | |
1010 | * Inherited from caller. | |
1011 | */ | |
b562468c | 1012 | static int mv_stop_edma_engine(void __iomem *port_mmio) |
20f733e7 | 1013 | { |
b562468c | 1014 | int i; |
31961943 | 1015 | |
b562468c ML |
1016 | /* Disable eDMA. The disable bit auto clears. */ |
1017 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); | |
8b260248 | 1018 | |
b562468c ML |
1019 | /* Wait for the chip to confirm eDMA is off. */ |
1020 | for (i = 10000; i > 0; i--) { | |
1021 | u32 reg = readl(port_mmio + EDMA_CMD_OFS); | |
4537deb5 | 1022 | if (!(reg & EDMA_EN)) |
b562468c ML |
1023 | return 0; |
1024 | udelay(10); | |
31961943 | 1025 | } |
b562468c | 1026 | return -EIO; |
20f733e7 BR |
1027 | } |
1028 | ||
e12bef50 | 1029 | static int mv_stop_edma(struct ata_port *ap) |
0ea9e179 | 1030 | { |
b562468c ML |
1031 | void __iomem *port_mmio = mv_ap_base(ap); |
1032 | struct mv_port_priv *pp = ap->private_data; | |
66e57a2c | 1033 | int err = 0; |
0ea9e179 | 1034 | |
b562468c ML |
1035 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) |
1036 | return 0; | |
1037 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | |
9b2c4e0b | 1038 | mv_wait_for_edma_empty_idle(ap); |
b562468c ML |
1039 | if (mv_stop_edma_engine(port_mmio)) { |
1040 | ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); | |
66e57a2c | 1041 | err = -EIO; |
b562468c | 1042 | } |
66e57a2c ML |
1043 | mv_edma_cfg(ap, 0, 0); |
1044 | return err; | |
0ea9e179 JG |
1045 | } |
1046 | ||
8a70f8dc | 1047 | #ifdef ATA_DEBUG |
31961943 | 1048 | static void mv_dump_mem(void __iomem *start, unsigned bytes) |
20f733e7 | 1049 | { |
31961943 BR |
1050 | int b, w; |
1051 | for (b = 0; b < bytes; ) { | |
1052 | DPRINTK("%p: ", start + b); | |
1053 | for (w = 0; b < bytes && w < 4; w++) { | |
2dcb407e | 1054 | printk("%08x ", readl(start + b)); |
31961943 BR |
1055 | b += sizeof(u32); |
1056 | } | |
1057 | printk("\n"); | |
1058 | } | |
31961943 | 1059 | } |
8a70f8dc JG |
1060 | #endif |
1061 | ||
31961943 BR |
1062 | static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) |
1063 | { | |
1064 | #ifdef ATA_DEBUG | |
1065 | int b, w; | |
1066 | u32 dw; | |
1067 | for (b = 0; b < bytes; ) { | |
1068 | DPRINTK("%02x: ", b); | |
1069 | for (w = 0; b < bytes && w < 4; w++) { | |
2dcb407e JG |
1070 | (void) pci_read_config_dword(pdev, b, &dw); |
1071 | printk("%08x ", dw); | |
31961943 BR |
1072 | b += sizeof(u32); |
1073 | } | |
1074 | printk("\n"); | |
1075 | } | |
1076 | #endif | |
1077 | } | |
1078 | static void mv_dump_all_regs(void __iomem *mmio_base, int port, | |
1079 | struct pci_dev *pdev) | |
1080 | { | |
1081 | #ifdef ATA_DEBUG | |
8b260248 | 1082 | void __iomem *hc_base = mv_hc_base(mmio_base, |
31961943 BR |
1083 | port >> MV_PORT_HC_SHIFT); |
1084 | void __iomem *port_base; | |
1085 | int start_port, num_ports, p, start_hc, num_hcs, hc; | |
1086 | ||
1087 | if (0 > port) { | |
1088 | start_hc = start_port = 0; | |
1089 | num_ports = 8; /* shld be benign for 4 port devs */ | |
1090 | num_hcs = 2; | |
1091 | } else { | |
1092 | start_hc = port >> MV_PORT_HC_SHIFT; | |
1093 | start_port = port; | |
1094 | num_ports = num_hcs = 1; | |
1095 | } | |
8b260248 | 1096 | DPRINTK("All registers for port(s) %u-%u:\n", start_port, |
31961943 BR |
1097 | num_ports > 1 ? num_ports - 1 : start_port); |
1098 | ||
1099 | if (NULL != pdev) { | |
1100 | DPRINTK("PCI config space regs:\n"); | |
1101 | mv_dump_pci_cfg(pdev, 0x68); | |
1102 | } | |
1103 | DPRINTK("PCI regs:\n"); | |
1104 | mv_dump_mem(mmio_base+0xc00, 0x3c); | |
1105 | mv_dump_mem(mmio_base+0xd00, 0x34); | |
1106 | mv_dump_mem(mmio_base+0xf00, 0x4); | |
1107 | mv_dump_mem(mmio_base+0x1d00, 0x6c); | |
1108 | for (hc = start_hc; hc < start_hc + num_hcs; hc++) { | |
d220c37e | 1109 | hc_base = mv_hc_base(mmio_base, hc); |
31961943 BR |
1110 | DPRINTK("HC regs (HC %i):\n", hc); |
1111 | mv_dump_mem(hc_base, 0x1c); | |
1112 | } | |
1113 | for (p = start_port; p < start_port + num_ports; p++) { | |
1114 | port_base = mv_port_base(mmio_base, p); | |
2dcb407e | 1115 | DPRINTK("EDMA regs (port %i):\n", p); |
31961943 | 1116 | mv_dump_mem(port_base, 0x54); |
2dcb407e | 1117 | DPRINTK("SATA regs (port %i):\n", p); |
31961943 BR |
1118 | mv_dump_mem(port_base+0x300, 0x60); |
1119 | } | |
1120 | #endif | |
20f733e7 BR |
1121 | } |
1122 | ||
1123 | static unsigned int mv_scr_offset(unsigned int sc_reg_in) | |
1124 | { | |
1125 | unsigned int ofs; | |
1126 | ||
1127 | switch (sc_reg_in) { | |
1128 | case SCR_STATUS: | |
1129 | case SCR_CONTROL: | |
1130 | case SCR_ERROR: | |
1131 | ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32)); | |
1132 | break; | |
1133 | case SCR_ACTIVE: | |
1134 | ofs = SATA_ACTIVE_OFS; /* active is not with the others */ | |
1135 | break; | |
1136 | default: | |
1137 | ofs = 0xffffffffU; | |
1138 | break; | |
1139 | } | |
1140 | return ofs; | |
1141 | } | |
1142 | ||
82ef04fb | 1143 | static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) |
20f733e7 BR |
1144 | { |
1145 | unsigned int ofs = mv_scr_offset(sc_reg_in); | |
1146 | ||
da3dbb17 | 1147 | if (ofs != 0xffffffffU) { |
82ef04fb | 1148 | *val = readl(mv_ap_base(link->ap) + ofs); |
da3dbb17 TH |
1149 | return 0; |
1150 | } else | |
1151 | return -EINVAL; | |
20f733e7 BR |
1152 | } |
1153 | ||
82ef04fb | 1154 | static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) |
20f733e7 BR |
1155 | { |
1156 | unsigned int ofs = mv_scr_offset(sc_reg_in); | |
1157 | ||
da3dbb17 | 1158 | if (ofs != 0xffffffffU) { |
82ef04fb | 1159 | writelfl(val, mv_ap_base(link->ap) + ofs); |
da3dbb17 TH |
1160 | return 0; |
1161 | } else | |
1162 | return -EINVAL; | |
20f733e7 BR |
1163 | } |
1164 | ||
f273827e ML |
1165 | static void mv6_dev_config(struct ata_device *adev) |
1166 | { | |
1167 | /* | |
e49856d8 ML |
1168 | * Deal with Gen-II ("mv6") hardware quirks/restrictions: |
1169 | * | |
1170 | * Gen-II does not support NCQ over a port multiplier | |
1171 | * (no FIS-based switching). | |
f273827e | 1172 | */ |
e49856d8 | 1173 | if (adev->flags & ATA_DFLAG_NCQ) { |
352fab70 | 1174 | if (sata_pmp_attached(adev->link->ap)) { |
e49856d8 | 1175 | adev->flags &= ~ATA_DFLAG_NCQ; |
352fab70 ML |
1176 | ata_dev_printk(adev, KERN_INFO, |
1177 | "NCQ disabled for command-based switching\n"); | |
352fab70 | 1178 | } |
e49856d8 | 1179 | } |
f273827e ML |
1180 | } |
1181 | ||
3e4a1391 ML |
1182 | static int mv_qc_defer(struct ata_queued_cmd *qc) |
1183 | { | |
1184 | struct ata_link *link = qc->dev->link; | |
1185 | struct ata_port *ap = link->ap; | |
1186 | struct mv_port_priv *pp = ap->private_data; | |
1187 | ||
29d187bb ML |
1188 | /* |
1189 | * Don't allow new commands if we're in a delayed EH state | |
1190 | * for NCQ and/or FIS-based switching. | |
1191 | */ | |
1192 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) | |
1193 | return ATA_DEFER_PORT; | |
3e4a1391 ML |
1194 | /* |
1195 | * If the port is completely idle, then allow the new qc. | |
1196 | */ | |
1197 | if (ap->nr_active_links == 0) | |
1198 | return 0; | |
1199 | ||
4bdee6c5 TH |
1200 | /* |
1201 | * The port is operating in host queuing mode (EDMA) with NCQ | |
1202 | * enabled, allow multiple NCQ commands. EDMA also allows | |
1203 | * queueing multiple DMA commands but libata core currently | |
1204 | * doesn't allow it. | |
1205 | */ | |
1206 | if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && | |
1207 | (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol)) | |
1208 | return 0; | |
1209 | ||
3e4a1391 ML |
1210 | return ATA_DEFER_PORT; |
1211 | } | |
1212 | ||
08da1759 | 1213 | static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs) |
e49856d8 | 1214 | { |
08da1759 ML |
1215 | struct mv_port_priv *pp = ap->private_data; |
1216 | void __iomem *port_mmio; | |
00f42eab | 1217 | |
08da1759 ML |
1218 | u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg; |
1219 | u32 ltmode, *old_ltmode = &pp->cached.ltmode; | |
1220 | u32 haltcond, *old_haltcond = &pp->cached.haltcond; | |
00f42eab | 1221 | |
08da1759 ML |
1222 | ltmode = *old_ltmode & ~LTMODE_BIT8; |
1223 | haltcond = *old_haltcond | EDMA_ERR_DEV; | |
00f42eab ML |
1224 | |
1225 | if (want_fbs) { | |
08da1759 ML |
1226 | fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC; |
1227 | ltmode = *old_ltmode | LTMODE_BIT8; | |
4c299ca3 | 1228 | if (want_ncq) |
08da1759 | 1229 | haltcond &= ~EDMA_ERR_DEV; |
4c299ca3 | 1230 | else |
08da1759 ML |
1231 | fiscfg |= FISCFG_WAIT_DEV_ERR; |
1232 | } else { | |
1233 | fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); | |
e49856d8 | 1234 | } |
00f42eab | 1235 | |
08da1759 ML |
1236 | port_mmio = mv_ap_base(ap); |
1237 | mv_write_cached_reg(port_mmio + FISCFG_OFS, old_fiscfg, fiscfg); | |
1238 | mv_write_cached_reg(port_mmio + LTMODE_OFS, old_ltmode, ltmode); | |
1239 | mv_write_cached_reg(port_mmio + EDMA_HALTCOND_OFS, old_haltcond, haltcond); | |
f273827e ML |
1240 | } |
1241 | ||
dd2890f6 ML |
1242 | static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) |
1243 | { | |
1244 | struct mv_host_priv *hpriv = ap->host->private_data; | |
1245 | u32 old, new; | |
1246 | ||
1247 | /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ | |
1248 | old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS); | |
1249 | if (want_ncq) | |
1250 | new = old | (1 << 22); | |
1251 | else | |
1252 | new = old & ~(1 << 22); | |
1253 | if (new != old) | |
1254 | writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS); | |
1255 | } | |
1256 | ||
c01e8a23 ML |
1257 | /** |
1258 | * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma | |
1259 | * @ap: Port being initialized | |
1260 | * | |
1261 | * There are two DMA modes on these chips: basic DMA, and EDMA. | |
1262 | * | |
1263 | * Bit-0 of the "EDMA RESERVED" register enables/disables use | |
1264 | * of basic DMA on the GEN_IIE versions of the chips. | |
1265 | * | |
1266 | * This bit survives EDMA resets, and must be set for basic DMA | |
1267 | * to function, and should be cleared when EDMA is active. | |
1268 | */ | |
1269 | static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma) | |
1270 | { | |
1271 | struct mv_port_priv *pp = ap->private_data; | |
1272 | u32 new, *old = &pp->cached.unknown_rsvd; | |
1273 | ||
1274 | if (enable_bmdma) | |
1275 | new = *old | 1; | |
1276 | else | |
1277 | new = *old & ~1; | |
1278 | mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD_OFS, old, new); | |
1279 | } | |
1280 | ||
00b81235 | 1281 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) |
e4e7b892 | 1282 | { |
0c58912e | 1283 | u32 cfg; |
e12bef50 ML |
1284 | struct mv_port_priv *pp = ap->private_data; |
1285 | struct mv_host_priv *hpriv = ap->host->private_data; | |
1286 | void __iomem *port_mmio = mv_ap_base(ap); | |
e4e7b892 JG |
1287 | |
1288 | /* set up non-NCQ EDMA configuration */ | |
0c58912e | 1289 | cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ |
d16ab3f6 ML |
1290 | pp->pp_flags &= |
1291 | ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); | |
e4e7b892 | 1292 | |
0c58912e | 1293 | if (IS_GEN_I(hpriv)) |
e4e7b892 JG |
1294 | cfg |= (1 << 8); /* enab config burst size mask */ |
1295 | ||
dd2890f6 | 1296 | else if (IS_GEN_II(hpriv)) { |
e4e7b892 | 1297 | cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; |
dd2890f6 | 1298 | mv_60x1_errata_sata25(ap, want_ncq); |
e4e7b892 | 1299 | |
dd2890f6 | 1300 | } else if (IS_GEN_IIE(hpriv)) { |
00f42eab ML |
1301 | int want_fbs = sata_pmp_attached(ap); |
1302 | /* | |
1303 | * Possible future enhancement: | |
1304 | * | |
1305 | * The chip can use FBS with non-NCQ, if we allow it, | |
1306 | * But first we need to have the error handling in place | |
1307 | * for this mode (datasheet section 7.3.15.4.2.3). | |
1308 | * So disallow non-NCQ FBS for now. | |
1309 | */ | |
1310 | want_fbs &= want_ncq; | |
1311 | ||
08da1759 | 1312 | mv_config_fbs(ap, want_ncq, want_fbs); |
00f42eab ML |
1313 | |
1314 | if (want_fbs) { | |
1315 | pp->pp_flags |= MV_PP_FLAG_FBS_EN; | |
1316 | cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ | |
1317 | } | |
1318 | ||
e728eabe | 1319 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ |
00b81235 ML |
1320 | if (want_edma) { |
1321 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ | |
1322 | if (!IS_SOC(hpriv)) | |
1323 | cfg |= (1 << 18); /* enab early completion */ | |
1324 | } | |
616d4a98 ML |
1325 | if (hpriv->hp_flags & MV_HP_CUT_THROUGH) |
1326 | cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ | |
c01e8a23 | 1327 | mv_bmdma_enable_iie(ap, !want_edma); |
e4e7b892 JG |
1328 | } |
1329 | ||
72109168 ML |
1330 | if (want_ncq) { |
1331 | cfg |= EDMA_CFG_NCQ; | |
1332 | pp->pp_flags |= MV_PP_FLAG_NCQ_EN; | |
00b81235 | 1333 | } |
72109168 | 1334 | |
e4e7b892 JG |
1335 | writelfl(cfg, port_mmio + EDMA_CFG_OFS); |
1336 | } | |
1337 | ||
da2fa9ba ML |
1338 | static void mv_port_free_dma_mem(struct ata_port *ap) |
1339 | { | |
1340 | struct mv_host_priv *hpriv = ap->host->private_data; | |
1341 | struct mv_port_priv *pp = ap->private_data; | |
eb73d558 | 1342 | int tag; |
da2fa9ba ML |
1343 | |
1344 | if (pp->crqb) { | |
1345 | dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); | |
1346 | pp->crqb = NULL; | |
1347 | } | |
1348 | if (pp->crpb) { | |
1349 | dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); | |
1350 | pp->crpb = NULL; | |
1351 | } | |
eb73d558 ML |
1352 | /* |
1353 | * For GEN_I, there's no NCQ, so we have only a single sg_tbl. | |
1354 | * For later hardware, we have one unique sg_tbl per NCQ tag. | |
1355 | */ | |
1356 | for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { | |
1357 | if (pp->sg_tbl[tag]) { | |
1358 | if (tag == 0 || !IS_GEN_I(hpriv)) | |
1359 | dma_pool_free(hpriv->sg_tbl_pool, | |
1360 | pp->sg_tbl[tag], | |
1361 | pp->sg_tbl_dma[tag]); | |
1362 | pp->sg_tbl[tag] = NULL; | |
1363 | } | |
da2fa9ba ML |
1364 | } |
1365 | } | |
1366 | ||
05b308e1 BR |
1367 | /** |
1368 | * mv_port_start - Port specific init/start routine. | |
1369 | * @ap: ATA channel to manipulate | |
1370 | * | |
1371 | * Allocate and point to DMA memory, init port private memory, | |
1372 | * zero indices. | |
1373 | * | |
1374 | * LOCKING: | |
1375 | * Inherited from caller. | |
1376 | */ | |
31961943 BR |
1377 | static int mv_port_start(struct ata_port *ap) |
1378 | { | |
cca3974e JG |
1379 | struct device *dev = ap->host->dev; |
1380 | struct mv_host_priv *hpriv = ap->host->private_data; | |
31961943 | 1381 | struct mv_port_priv *pp; |
dde20207 | 1382 | int tag; |
31961943 | 1383 | |
24dc5f33 | 1384 | pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); |
6037d6bb | 1385 | if (!pp) |
24dc5f33 | 1386 | return -ENOMEM; |
da2fa9ba | 1387 | ap->private_data = pp; |
31961943 | 1388 | |
da2fa9ba ML |
1389 | pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); |
1390 | if (!pp->crqb) | |
1391 | return -ENOMEM; | |
1392 | memset(pp->crqb, 0, MV_CRQB_Q_SZ); | |
31961943 | 1393 | |
da2fa9ba ML |
1394 | pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); |
1395 | if (!pp->crpb) | |
1396 | goto out_port_free_dma_mem; | |
1397 | memset(pp->crpb, 0, MV_CRPB_Q_SZ); | |
31961943 | 1398 | |
3bd0a70e ML |
1399 | /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ |
1400 | if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) | |
1401 | ap->flags |= ATA_FLAG_AN; | |
eb73d558 ML |
1402 | /* |
1403 | * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. | |
1404 | * For later hardware, we need one unique sg_tbl per NCQ tag. | |
1405 | */ | |
1406 | for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { | |
1407 | if (tag == 0 || !IS_GEN_I(hpriv)) { | |
1408 | pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, | |
1409 | GFP_KERNEL, &pp->sg_tbl_dma[tag]); | |
1410 | if (!pp->sg_tbl[tag]) | |
1411 | goto out_port_free_dma_mem; | |
1412 | } else { | |
1413 | pp->sg_tbl[tag] = pp->sg_tbl[0]; | |
1414 | pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; | |
1415 | } | |
1416 | } | |
08da1759 | 1417 | mv_save_cached_regs(ap); |
66e57a2c | 1418 | mv_edma_cfg(ap, 0, 0); |
31961943 | 1419 | return 0; |
da2fa9ba ML |
1420 | |
1421 | out_port_free_dma_mem: | |
1422 | mv_port_free_dma_mem(ap); | |
1423 | return -ENOMEM; | |
31961943 BR |
1424 | } |
1425 | ||
05b308e1 BR |
1426 | /** |
1427 | * mv_port_stop - Port specific cleanup/stop routine. | |
1428 | * @ap: ATA channel to manipulate | |
1429 | * | |
1430 | * Stop DMA, cleanup port memory. | |
1431 | * | |
1432 | * LOCKING: | |
cca3974e | 1433 | * This routine uses the host lock to protect the DMA stop. |
05b308e1 | 1434 | */ |
31961943 BR |
1435 | static void mv_port_stop(struct ata_port *ap) |
1436 | { | |
e12bef50 | 1437 | mv_stop_edma(ap); |
88e675e1 | 1438 | mv_enable_port_irqs(ap, 0); |
da2fa9ba | 1439 | mv_port_free_dma_mem(ap); |
31961943 BR |
1440 | } |
1441 | ||
05b308e1 BR |
1442 | /** |
1443 | * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries | |
1444 | * @qc: queued command whose SG list to source from | |
1445 | * | |
1446 | * Populate the SG list and mark the last entry. | |
1447 | * | |
1448 | * LOCKING: | |
1449 | * Inherited from caller. | |
1450 | */ | |
6c08772e | 1451 | static void mv_fill_sg(struct ata_queued_cmd *qc) |
31961943 BR |
1452 | { |
1453 | struct mv_port_priv *pp = qc->ap->private_data; | |
972c26bd | 1454 | struct scatterlist *sg; |
3be6cbd7 | 1455 | struct mv_sg *mv_sg, *last_sg = NULL; |
ff2aeb1e | 1456 | unsigned int si; |
31961943 | 1457 | |
eb73d558 | 1458 | mv_sg = pp->sg_tbl[qc->tag]; |
ff2aeb1e | 1459 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
d88184fb JG |
1460 | dma_addr_t addr = sg_dma_address(sg); |
1461 | u32 sg_len = sg_dma_len(sg); | |
22374677 | 1462 | |
4007b493 OJ |
1463 | while (sg_len) { |
1464 | u32 offset = addr & 0xffff; | |
1465 | u32 len = sg_len; | |
22374677 | 1466 | |
32cd11a6 | 1467 | if (offset + len > 0x10000) |
4007b493 OJ |
1468 | len = 0x10000 - offset; |
1469 | ||
1470 | mv_sg->addr = cpu_to_le32(addr & 0xffffffff); | |
1471 | mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); | |
6c08772e | 1472 | mv_sg->flags_size = cpu_to_le32(len & 0xffff); |
32cd11a6 | 1473 | mv_sg->reserved = 0; |
4007b493 OJ |
1474 | |
1475 | sg_len -= len; | |
1476 | addr += len; | |
1477 | ||
3be6cbd7 | 1478 | last_sg = mv_sg; |
4007b493 | 1479 | mv_sg++; |
4007b493 | 1480 | } |
31961943 | 1481 | } |
3be6cbd7 JG |
1482 | |
1483 | if (likely(last_sg)) | |
1484 | last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); | |
32cd11a6 | 1485 | mb(); /* ensure data structure is visible to the chipset */ |
31961943 BR |
1486 | } |
1487 | ||
5796d1c4 | 1488 | static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) |
31961943 | 1489 | { |
559eedad | 1490 | u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | |
31961943 | 1491 | (last ? CRQB_CMD_LAST : 0); |
559eedad | 1492 | *cmdw = cpu_to_le16(tmp); |
31961943 BR |
1493 | } |
1494 | ||
da14265e ML |
1495 | /** |
1496 | * mv_sff_irq_clear - Clear hardware interrupt after DMA. | |
1497 | * @ap: Port associated with this ATA transaction. | |
1498 | * | |
1499 | * We need this only for ATAPI bmdma transactions, | |
1500 | * as otherwise we experience spurious interrupts | |
1501 | * after libata-sff handles the bmdma interrupts. | |
1502 | */ | |
1503 | static void mv_sff_irq_clear(struct ata_port *ap) | |
1504 | { | |
1505 | mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ); | |
1506 | } | |
1507 | ||
1508 | /** | |
1509 | * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA. | |
1510 | * @qc: queued command to check for chipset/DMA compatibility. | |
1511 | * | |
1512 | * The bmdma engines cannot handle speculative data sizes | |
1513 | * (bytecount under/over flow). So only allow DMA for | |
1514 | * data transfer commands with known data sizes. | |
1515 | * | |
1516 | * LOCKING: | |
1517 | * Inherited from caller. | |
1518 | */ | |
1519 | static int mv_check_atapi_dma(struct ata_queued_cmd *qc) | |
1520 | { | |
1521 | struct scsi_cmnd *scmd = qc->scsicmd; | |
1522 | ||
1523 | if (scmd) { | |
1524 | switch (scmd->cmnd[0]) { | |
1525 | case READ_6: | |
1526 | case READ_10: | |
1527 | case READ_12: | |
1528 | case WRITE_6: | |
1529 | case WRITE_10: | |
1530 | case WRITE_12: | |
1531 | case GPCMD_READ_CD: | |
1532 | case GPCMD_SEND_DVD_STRUCTURE: | |
1533 | case GPCMD_SEND_CUE_SHEET: | |
1534 | return 0; /* DMA is safe */ | |
1535 | } | |
1536 | } | |
1537 | return -EOPNOTSUPP; /* use PIO instead */ | |
1538 | } | |
1539 | ||
1540 | /** | |
1541 | * mv_bmdma_setup - Set up BMDMA transaction | |
1542 | * @qc: queued command to prepare DMA for. | |
1543 | * | |
1544 | * LOCKING: | |
1545 | * Inherited from caller. | |
1546 | */ | |
1547 | static void mv_bmdma_setup(struct ata_queued_cmd *qc) | |
1548 | { | |
1549 | struct ata_port *ap = qc->ap; | |
1550 | void __iomem *port_mmio = mv_ap_base(ap); | |
1551 | struct mv_port_priv *pp = ap->private_data; | |
1552 | ||
1553 | mv_fill_sg(qc); | |
1554 | ||
1555 | /* clear all DMA cmd bits */ | |
1556 | writel(0, port_mmio + BMDMA_CMD_OFS); | |
1557 | ||
1558 | /* load PRD table addr. */ | |
1559 | writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16, | |
1560 | port_mmio + BMDMA_PRD_HIGH_OFS); | |
1561 | writelfl(pp->sg_tbl_dma[qc->tag], | |
1562 | port_mmio + BMDMA_PRD_LOW_OFS); | |
1563 | ||
1564 | /* issue r/w command */ | |
1565 | ap->ops->sff_exec_command(ap, &qc->tf); | |
1566 | } | |
1567 | ||
1568 | /** | |
1569 | * mv_bmdma_start - Start a BMDMA transaction | |
1570 | * @qc: queued command to start DMA on. | |
1571 | * | |
1572 | * LOCKING: | |
1573 | * Inherited from caller. | |
1574 | */ | |
1575 | static void mv_bmdma_start(struct ata_queued_cmd *qc) | |
1576 | { | |
1577 | struct ata_port *ap = qc->ap; | |
1578 | void __iomem *port_mmio = mv_ap_base(ap); | |
1579 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | |
1580 | u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START; | |
1581 | ||
1582 | /* start host DMA transaction */ | |
1583 | writelfl(cmd, port_mmio + BMDMA_CMD_OFS); | |
1584 | } | |
1585 | ||
1586 | /** | |
1587 | * mv_bmdma_stop - Stop BMDMA transfer | |
1588 | * @qc: queued command to stop DMA on. | |
1589 | * | |
1590 | * Clears the ATA_DMA_START flag in the bmdma control register | |
1591 | * | |
1592 | * LOCKING: | |
1593 | * Inherited from caller. | |
1594 | */ | |
1595 | static void mv_bmdma_stop(struct ata_queued_cmd *qc) | |
1596 | { | |
1597 | struct ata_port *ap = qc->ap; | |
1598 | void __iomem *port_mmio = mv_ap_base(ap); | |
1599 | u32 cmd; | |
1600 | ||
1601 | /* clear start/stop bit */ | |
1602 | cmd = readl(port_mmio + BMDMA_CMD_OFS); | |
1603 | cmd &= ~ATA_DMA_START; | |
1604 | writelfl(cmd, port_mmio + BMDMA_CMD_OFS); | |
1605 | ||
1606 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | |
1607 | ata_sff_dma_pause(ap); | |
1608 | } | |
1609 | ||
1610 | /** | |
1611 | * mv_bmdma_status - Read BMDMA status | |
1612 | * @ap: port for which to retrieve DMA status. | |
1613 | * | |
1614 | * Read and return equivalent of the sff BMDMA status register. | |
1615 | * | |
1616 | * LOCKING: | |
1617 | * Inherited from caller. | |
1618 | */ | |
1619 | static u8 mv_bmdma_status(struct ata_port *ap) | |
1620 | { | |
1621 | void __iomem *port_mmio = mv_ap_base(ap); | |
1622 | u32 reg, status; | |
1623 | ||
1624 | /* | |
1625 | * Other bits are valid only if ATA_DMA_ACTIVE==0, | |
1626 | * and the ATA_DMA_INTR bit doesn't exist. | |
1627 | */ | |
1628 | reg = readl(port_mmio + BMDMA_STATUS_OFS); | |
1629 | if (reg & ATA_DMA_ACTIVE) | |
1630 | status = ATA_DMA_ACTIVE; | |
1631 | else | |
1632 | status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; | |
1633 | return status; | |
1634 | } | |
1635 | ||
05b308e1 BR |
1636 | /** |
1637 | * mv_qc_prep - Host specific command preparation. | |
1638 | * @qc: queued command to prepare | |
1639 | * | |
1640 | * This routine simply redirects to the general purpose routine | |
1641 | * if command is not DMA. Else, it handles prep of the CRQB | |
1642 | * (command request block), does some sanity checking, and calls | |
1643 | * the SG load routine. | |
1644 | * | |
1645 | * LOCKING: | |
1646 | * Inherited from caller. | |
1647 | */ | |
31961943 BR |
1648 | static void mv_qc_prep(struct ata_queued_cmd *qc) |
1649 | { | |
1650 | struct ata_port *ap = qc->ap; | |
1651 | struct mv_port_priv *pp = ap->private_data; | |
e1469874 | 1652 | __le16 *cw; |
31961943 BR |
1653 | struct ata_taskfile *tf; |
1654 | u16 flags = 0; | |
a6432436 | 1655 | unsigned in_index; |
31961943 | 1656 | |
138bfdd0 ML |
1657 | if ((qc->tf.protocol != ATA_PROT_DMA) && |
1658 | (qc->tf.protocol != ATA_PROT_NCQ)) | |
31961943 | 1659 | return; |
20f733e7 | 1660 | |
31961943 BR |
1661 | /* Fill in command request block |
1662 | */ | |
e4e7b892 | 1663 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) |
31961943 | 1664 | flags |= CRQB_FLAG_READ; |
beec7dbc | 1665 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); |
31961943 | 1666 | flags |= qc->tag << CRQB_TAG_SHIFT; |
e49856d8 | 1667 | flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; |
31961943 | 1668 | |
bdd4ddde | 1669 | /* get current queue index from software */ |
fcfb1f77 | 1670 | in_index = pp->req_idx; |
a6432436 ML |
1671 | |
1672 | pp->crqb[in_index].sg_addr = | |
eb73d558 | 1673 | cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); |
a6432436 | 1674 | pp->crqb[in_index].sg_addr_hi = |
eb73d558 | 1675 | cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); |
a6432436 | 1676 | pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); |
31961943 | 1677 | |
a6432436 | 1678 | cw = &pp->crqb[in_index].ata_cmd[0]; |
31961943 BR |
1679 | tf = &qc->tf; |
1680 | ||
1681 | /* Sadly, the CRQB cannot accomodate all registers--there are | |
1682 | * only 11 bytes...so we must pick and choose required | |
1683 | * registers based on the command. So, we drop feature and | |
1684 | * hob_feature for [RW] DMA commands, but they are needed for | |
cd12e1f7 ML |
1685 | * NCQ. NCQ will drop hob_nsect, which is not needed there |
1686 | * (nsect is used only for the tag; feat/hob_feat hold true nsect). | |
20f733e7 | 1687 | */ |
31961943 BR |
1688 | switch (tf->command) { |
1689 | case ATA_CMD_READ: | |
1690 | case ATA_CMD_READ_EXT: | |
1691 | case ATA_CMD_WRITE: | |
1692 | case ATA_CMD_WRITE_EXT: | |
c15d85c8 | 1693 | case ATA_CMD_WRITE_FUA_EXT: |
31961943 BR |
1694 | mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); |
1695 | break; | |
31961943 BR |
1696 | case ATA_CMD_FPDMA_READ: |
1697 | case ATA_CMD_FPDMA_WRITE: | |
8b260248 | 1698 | mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); |
31961943 BR |
1699 | mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); |
1700 | break; | |
31961943 BR |
1701 | default: |
1702 | /* The only other commands EDMA supports in non-queued and | |
1703 | * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none | |
1704 | * of which are defined/used by Linux. If we get here, this | |
1705 | * driver needs work. | |
1706 | * | |
1707 | * FIXME: modify libata to give qc_prep a return value and | |
1708 | * return error here. | |
1709 | */ | |
1710 | BUG_ON(tf->command); | |
1711 | break; | |
1712 | } | |
1713 | mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); | |
1714 | mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); | |
1715 | mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); | |
1716 | mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); | |
1717 | mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); | |
1718 | mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); | |
1719 | mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); | |
1720 | mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); | |
1721 | mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ | |
1722 | ||
e4e7b892 JG |
1723 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
1724 | return; | |
1725 | mv_fill_sg(qc); | |
1726 | } | |
1727 | ||
1728 | /** | |
1729 | * mv_qc_prep_iie - Host specific command preparation. | |
1730 | * @qc: queued command to prepare | |
1731 | * | |
1732 | * This routine simply redirects to the general purpose routine | |
1733 | * if command is not DMA. Else, it handles prep of the CRQB | |
1734 | * (command request block), does some sanity checking, and calls | |
1735 | * the SG load routine. | |
1736 | * | |
1737 | * LOCKING: | |
1738 | * Inherited from caller. | |
1739 | */ | |
1740 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |
1741 | { | |
1742 | struct ata_port *ap = qc->ap; | |
1743 | struct mv_port_priv *pp = ap->private_data; | |
1744 | struct mv_crqb_iie *crqb; | |
1745 | struct ata_taskfile *tf; | |
a6432436 | 1746 | unsigned in_index; |
e4e7b892 JG |
1747 | u32 flags = 0; |
1748 | ||
138bfdd0 ML |
1749 | if ((qc->tf.protocol != ATA_PROT_DMA) && |
1750 | (qc->tf.protocol != ATA_PROT_NCQ)) | |
e4e7b892 JG |
1751 | return; |
1752 | ||
e12bef50 | 1753 | /* Fill in Gen IIE command request block */ |
e4e7b892 JG |
1754 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) |
1755 | flags |= CRQB_FLAG_READ; | |
1756 | ||
beec7dbc | 1757 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); |
e4e7b892 | 1758 | flags |= qc->tag << CRQB_TAG_SHIFT; |
8c0aeb4a | 1759 | flags |= qc->tag << CRQB_HOSTQ_SHIFT; |
e49856d8 | 1760 | flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; |
e4e7b892 | 1761 | |
bdd4ddde | 1762 | /* get current queue index from software */ |
fcfb1f77 | 1763 | in_index = pp->req_idx; |
a6432436 ML |
1764 | |
1765 | crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; | |
eb73d558 ML |
1766 | crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); |
1767 | crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); | |
e4e7b892 JG |
1768 | crqb->flags = cpu_to_le32(flags); |
1769 | ||
1770 | tf = &qc->tf; | |
1771 | crqb->ata_cmd[0] = cpu_to_le32( | |
1772 | (tf->command << 16) | | |
1773 | (tf->feature << 24) | |
1774 | ); | |
1775 | crqb->ata_cmd[1] = cpu_to_le32( | |
1776 | (tf->lbal << 0) | | |
1777 | (tf->lbam << 8) | | |
1778 | (tf->lbah << 16) | | |
1779 | (tf->device << 24) | |
1780 | ); | |
1781 | crqb->ata_cmd[2] = cpu_to_le32( | |
1782 | (tf->hob_lbal << 0) | | |
1783 | (tf->hob_lbam << 8) | | |
1784 | (tf->hob_lbah << 16) | | |
1785 | (tf->hob_feature << 24) | |
1786 | ); | |
1787 | crqb->ata_cmd[3] = cpu_to_le32( | |
1788 | (tf->nsect << 0) | | |
1789 | (tf->hob_nsect << 8) | |
1790 | ); | |
1791 | ||
1792 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) | |
31961943 | 1793 | return; |
31961943 BR |
1794 | mv_fill_sg(qc); |
1795 | } | |
1796 | ||
d16ab3f6 ML |
1797 | /** |
1798 | * mv_sff_check_status - fetch device status, if valid | |
1799 | * @ap: ATA port to fetch status from | |
1800 | * | |
1801 | * When using command issue via mv_qc_issue_fis(), | |
1802 | * the initial ATA_BUSY state does not show up in the | |
1803 | * ATA status (shadow) register. This can confuse libata! | |
1804 | * | |
1805 | * So we have a hook here to fake ATA_BUSY for that situation, | |
1806 | * until the first time a BUSY, DRQ, or ERR bit is seen. | |
1807 | * | |
1808 | * The rest of the time, it simply returns the ATA status register. | |
1809 | */ | |
1810 | static u8 mv_sff_check_status(struct ata_port *ap) | |
1811 | { | |
1812 | u8 stat = ioread8(ap->ioaddr.status_addr); | |
1813 | struct mv_port_priv *pp = ap->private_data; | |
1814 | ||
1815 | if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) { | |
1816 | if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR)) | |
1817 | pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; | |
1818 | else | |
1819 | stat = ATA_BUSY; | |
1820 | } | |
1821 | return stat; | |
1822 | } | |
1823 | ||
70f8b79c ML |
1824 | /** |
1825 | * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register | |
1826 | * @fis: fis to be sent | |
1827 | * @nwords: number of 32-bit words in the fis | |
1828 | */ | |
1829 | static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords) | |
1830 | { | |
1831 | void __iomem *port_mmio = mv_ap_base(ap); | |
1832 | u32 ifctl, old_ifctl, ifstat; | |
1833 | int i, timeout = 200, final_word = nwords - 1; | |
1834 | ||
1835 | /* Initiate FIS transmission mode */ | |
1836 | old_ifctl = readl(port_mmio + SATA_IFCTL_OFS); | |
1837 | ifctl = 0x100 | (old_ifctl & 0xf); | |
1838 | writelfl(ifctl, port_mmio + SATA_IFCTL_OFS); | |
1839 | ||
1840 | /* Send all words of the FIS except for the final word */ | |
1841 | for (i = 0; i < final_word; ++i) | |
1842 | writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS_OFS); | |
1843 | ||
1844 | /* Flag end-of-transmission, and then send the final word */ | |
1845 | writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL_OFS); | |
1846 | writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS_OFS); | |
1847 | ||
1848 | /* | |
1849 | * Wait for FIS transmission to complete. | |
1850 | * This typically takes just a single iteration. | |
1851 | */ | |
1852 | do { | |
1853 | ifstat = readl(port_mmio + SATA_IFSTAT_OFS); | |
1854 | } while (!(ifstat & 0x1000) && --timeout); | |
1855 | ||
1856 | /* Restore original port configuration */ | |
1857 | writelfl(old_ifctl, port_mmio + SATA_IFCTL_OFS); | |
1858 | ||
1859 | /* See if it worked */ | |
1860 | if ((ifstat & 0x3000) != 0x1000) { | |
1861 | ata_port_printk(ap, KERN_WARNING, | |
1862 | "%s transmission error, ifstat=%08x\n", | |
1863 | __func__, ifstat); | |
1864 | return AC_ERR_OTHER; | |
1865 | } | |
1866 | return 0; | |
1867 | } | |
1868 | ||
1869 | /** | |
1870 | * mv_qc_issue_fis - Issue a command directly as a FIS | |
1871 | * @qc: queued command to start | |
1872 | * | |
1873 | * Note that the ATA shadow registers are not updated | |
1874 | * after command issue, so the device will appear "READY" | |
1875 | * if polled, even while it is BUSY processing the command. | |
1876 | * | |
1877 | * So we use a status hook to fake ATA_BUSY until the drive changes state. | |
1878 | * | |
1879 | * Note: we don't get updated shadow regs on *completion* | |
1880 | * of non-data commands. So avoid sending them via this function, | |
1881 | * as they will appear to have completed immediately. | |
1882 | * | |
1883 | * GEN_IIE has special registers that we could get the result tf from, | |
1884 | * but earlier chipsets do not. For now, we ignore those registers. | |
1885 | */ | |
1886 | static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) | |
1887 | { | |
1888 | struct ata_port *ap = qc->ap; | |
1889 | struct mv_port_priv *pp = ap->private_data; | |
1890 | struct ata_link *link = qc->dev->link; | |
1891 | u32 fis[5]; | |
1892 | int err = 0; | |
1893 | ||
1894 | ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis); | |
1895 | err = mv_send_fis(ap, fis, sizeof(fis) / sizeof(fis[0])); | |
1896 | if (err) | |
1897 | return err; | |
1898 | ||
1899 | switch (qc->tf.protocol) { | |
1900 | case ATAPI_PROT_PIO: | |
1901 | pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; | |
1902 | /* fall through */ | |
1903 | case ATAPI_PROT_NODATA: | |
1904 | ap->hsm_task_state = HSM_ST_FIRST; | |
1905 | break; | |
1906 | case ATA_PROT_PIO: | |
1907 | pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; | |
1908 | if (qc->tf.flags & ATA_TFLAG_WRITE) | |
1909 | ap->hsm_task_state = HSM_ST_FIRST; | |
1910 | else | |
1911 | ap->hsm_task_state = HSM_ST; | |
1912 | break; | |
1913 | default: | |
1914 | ap->hsm_task_state = HSM_ST_LAST; | |
1915 | break; | |
1916 | } | |
1917 | ||
1918 | if (qc->tf.flags & ATA_TFLAG_POLLING) | |
1919 | ata_pio_queue_task(ap, qc, 0); | |
1920 | return 0; | |
1921 | } | |
1922 | ||
05b308e1 BR |
1923 | /** |
1924 | * mv_qc_issue - Initiate a command to the host | |
1925 | * @qc: queued command to start | |
1926 | * | |
1927 | * This routine simply redirects to the general purpose routine | |
1928 | * if command is not DMA. Else, it sanity checks our local | |
1929 | * caches of the request producer/consumer indices then enables | |
1930 | * DMA and bumps the request producer index. | |
1931 | * | |
1932 | * LOCKING: | |
1933 | * Inherited from caller. | |
1934 | */ | |
9a3d9eb0 | 1935 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) |
31961943 | 1936 | { |
f48765cc | 1937 | static int limit_warnings = 10; |
c5d3e45a JG |
1938 | struct ata_port *ap = qc->ap; |
1939 | void __iomem *port_mmio = mv_ap_base(ap); | |
1940 | struct mv_port_priv *pp = ap->private_data; | |
bdd4ddde | 1941 | u32 in_index; |
42ed893d | 1942 | unsigned int port_irqs; |
f48765cc | 1943 | |
d16ab3f6 ML |
1944 | pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */ |
1945 | ||
f48765cc ML |
1946 | switch (qc->tf.protocol) { |
1947 | case ATA_PROT_DMA: | |
1948 | case ATA_PROT_NCQ: | |
1949 | mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); | |
1950 | pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; | |
1951 | in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; | |
1952 | ||
1953 | /* Write the request in pointer to kick the EDMA to life */ | |
1954 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, | |
1955 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | |
1956 | return 0; | |
31961943 | 1957 | |
f48765cc | 1958 | case ATA_PROT_PIO: |
c6112bd8 ML |
1959 | /* |
1960 | * Errata SATA#16, SATA#24: warn if multiple DRQs expected. | |
1961 | * | |
1962 | * Someday, we might implement special polling workarounds | |
1963 | * for these, but it all seems rather unnecessary since we | |
1964 | * normally use only DMA for commands which transfer more | |
1965 | * than a single block of data. | |
1966 | * | |
1967 | * Much of the time, this could just work regardless. | |
1968 | * So for now, just log the incident, and allow the attempt. | |
1969 | */ | |
c7843e8f | 1970 | if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) { |
c6112bd8 ML |
1971 | --limit_warnings; |
1972 | ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME | |
1973 | ": attempting PIO w/multiple DRQ: " | |
1974 | "this may fail due to h/w errata\n"); | |
1975 | } | |
f48765cc | 1976 | /* drop through */ |
42ed893d | 1977 | case ATA_PROT_NODATA: |
f48765cc | 1978 | case ATAPI_PROT_PIO: |
42ed893d ML |
1979 | case ATAPI_PROT_NODATA: |
1980 | if (ap->flags & ATA_FLAG_PIO_POLLING) | |
1981 | qc->tf.flags |= ATA_TFLAG_POLLING; | |
1982 | break; | |
31961943 | 1983 | } |
42ed893d ML |
1984 | |
1985 | if (qc->tf.flags & ATA_TFLAG_POLLING) | |
1986 | port_irqs = ERR_IRQ; /* mask device interrupt when polling */ | |
1987 | else | |
1988 | port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */ | |
1989 | ||
1990 | /* | |
1991 | * We're about to send a non-EDMA capable command to the | |
1992 | * port. Turn off EDMA so there won't be problems accessing | |
1993 | * shadow block, etc registers. | |
1994 | */ | |
1995 | mv_stop_edma(ap); | |
1996 | mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs); | |
1997 | mv_pmp_select(ap, qc->dev->link->pmp); | |
70f8b79c ML |
1998 | |
1999 | if (qc->tf.command == ATA_CMD_READ_LOG_EXT) { | |
2000 | struct mv_host_priv *hpriv = ap->host->private_data; | |
2001 | /* | |
2002 | * Workaround for 88SX60x1 FEr SATA#25 (part 2). | |
2003 | * | |
2004 | * After any NCQ error, the READ_LOG_EXT command | |
2005 | * from libata-eh *must* use mv_qc_issue_fis(). | |
2006 | * Otherwise it might fail, due to chip errata. | |
2007 | * | |
2008 | * Rather than special-case it, we'll just *always* | |
2009 | * use this method here for READ_LOG_EXT, making for | |
2010 | * easier testing. | |
2011 | */ | |
2012 | if (IS_GEN_II(hpriv)) | |
2013 | return mv_qc_issue_fis(qc); | |
2014 | } | |
42ed893d | 2015 | return ata_sff_qc_issue(qc); |
31961943 BR |
2016 | } |
2017 | ||
8f767f8a ML |
2018 | static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) |
2019 | { | |
2020 | struct mv_port_priv *pp = ap->private_data; | |
2021 | struct ata_queued_cmd *qc; | |
2022 | ||
2023 | if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) | |
2024 | return NULL; | |
2025 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | |
95db5051 ML |
2026 | if (qc) { |
2027 | if (qc->tf.flags & ATA_TFLAG_POLLING) | |
2028 | qc = NULL; | |
2029 | else if (!(qc->flags & ATA_QCFLAG_ACTIVE)) | |
2030 | qc = NULL; | |
2031 | } | |
8f767f8a ML |
2032 | return qc; |
2033 | } | |
2034 | ||
29d187bb ML |
2035 | static void mv_pmp_error_handler(struct ata_port *ap) |
2036 | { | |
2037 | unsigned int pmp, pmp_map; | |
2038 | struct mv_port_priv *pp = ap->private_data; | |
2039 | ||
2040 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { | |
2041 | /* | |
2042 | * Perform NCQ error analysis on failed PMPs | |
2043 | * before we freeze the port entirely. | |
2044 | * | |
2045 | * The failed PMPs are marked earlier by mv_pmp_eh_prep(). | |
2046 | */ | |
2047 | pmp_map = pp->delayed_eh_pmp_map; | |
2048 | pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; | |
2049 | for (pmp = 0; pmp_map != 0; pmp++) { | |
2050 | unsigned int this_pmp = (1 << pmp); | |
2051 | if (pmp_map & this_pmp) { | |
2052 | struct ata_link *link = &ap->pmp_link[pmp]; | |
2053 | pmp_map &= ~this_pmp; | |
2054 | ata_eh_analyze_ncq_error(link); | |
2055 | } | |
2056 | } | |
2057 | ata_port_freeze(ap); | |
2058 | } | |
2059 | sata_pmp_error_handler(ap); | |
2060 | } | |
2061 | ||
4c299ca3 ML |
2062 | static unsigned int mv_get_err_pmp_map(struct ata_port *ap) |
2063 | { | |
2064 | void __iomem *port_mmio = mv_ap_base(ap); | |
2065 | ||
2066 | return readl(port_mmio + SATA_TESTCTL_OFS) >> 16; | |
2067 | } | |
2068 | ||
4c299ca3 ML |
2069 | static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) |
2070 | { | |
2071 | struct ata_eh_info *ehi; | |
2072 | unsigned int pmp; | |
2073 | ||
2074 | /* | |
2075 | * Initialize EH info for PMPs which saw device errors | |
2076 | */ | |
2077 | ehi = &ap->link.eh_info; | |
2078 | for (pmp = 0; pmp_map != 0; pmp++) { | |
2079 | unsigned int this_pmp = (1 << pmp); | |
2080 | if (pmp_map & this_pmp) { | |
2081 | struct ata_link *link = &ap->pmp_link[pmp]; | |
2082 | ||
2083 | pmp_map &= ~this_pmp; | |
2084 | ehi = &link->eh_info; | |
2085 | ata_ehi_clear_desc(ehi); | |
2086 | ata_ehi_push_desc(ehi, "dev err"); | |
2087 | ehi->err_mask |= AC_ERR_DEV; | |
2088 | ehi->action |= ATA_EH_RESET; | |
2089 | ata_link_abort(link); | |
2090 | } | |
2091 | } | |
2092 | } | |
2093 | ||
06aaca3f ML |
2094 | static int mv_req_q_empty(struct ata_port *ap) |
2095 | { | |
2096 | void __iomem *port_mmio = mv_ap_base(ap); | |
2097 | u32 in_ptr, out_ptr; | |
2098 | ||
2099 | in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS) | |
2100 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | |
2101 | out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) | |
2102 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | |
2103 | return (in_ptr == out_ptr); /* 1 == queue_is_empty */ | |
2104 | } | |
2105 | ||
4c299ca3 ML |
2106 | static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) |
2107 | { | |
2108 | struct mv_port_priv *pp = ap->private_data; | |
2109 | int failed_links; | |
2110 | unsigned int old_map, new_map; | |
2111 | ||
2112 | /* | |
2113 | * Device error during FBS+NCQ operation: | |
2114 | * | |
2115 | * Set a port flag to prevent further I/O being enqueued. | |
2116 | * Leave the EDMA running to drain outstanding commands from this port. | |
2117 | * Perform the post-mortem/EH only when all responses are complete. | |
2118 | * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). | |
2119 | */ | |
2120 | if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { | |
2121 | pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; | |
2122 | pp->delayed_eh_pmp_map = 0; | |
2123 | } | |
2124 | old_map = pp->delayed_eh_pmp_map; | |
2125 | new_map = old_map | mv_get_err_pmp_map(ap); | |
2126 | ||
2127 | if (old_map != new_map) { | |
2128 | pp->delayed_eh_pmp_map = new_map; | |
2129 | mv_pmp_eh_prep(ap, new_map & ~old_map); | |
2130 | } | |
c46938cc | 2131 | failed_links = hweight16(new_map); |
4c299ca3 ML |
2132 | |
2133 | ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x " | |
2134 | "failed_links=%d nr_active_links=%d\n", | |
2135 | __func__, pp->delayed_eh_pmp_map, | |
2136 | ap->qc_active, failed_links, | |
2137 | ap->nr_active_links); | |
2138 | ||
06aaca3f | 2139 | if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) { |
4c299ca3 ML |
2140 | mv_process_crpb_entries(ap, pp); |
2141 | mv_stop_edma(ap); | |
2142 | mv_eh_freeze(ap); | |
2143 | ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__); | |
2144 | return 1; /* handled */ | |
2145 | } | |
2146 | ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__); | |
2147 | return 1; /* handled */ | |
2148 | } | |
2149 | ||
2150 | static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) | |
2151 | { | |
2152 | /* | |
2153 | * Possible future enhancement: | |
2154 | * | |
2155 | * FBS+non-NCQ operation is not yet implemented. | |
2156 | * See related notes in mv_edma_cfg(). | |
2157 | * | |
2158 | * Device error during FBS+non-NCQ operation: | |
2159 | * | |
2160 | * We need to snapshot the shadow registers for each failed command. | |
2161 | * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). | |
2162 | */ | |
2163 | return 0; /* not handled */ | |
2164 | } | |
2165 | ||
2166 | static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) | |
2167 | { | |
2168 | struct mv_port_priv *pp = ap->private_data; | |
2169 | ||
2170 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) | |
2171 | return 0; /* EDMA was not active: not handled */ | |
2172 | if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) | |
2173 | return 0; /* FBS was not active: not handled */ | |
2174 | ||
2175 | if (!(edma_err_cause & EDMA_ERR_DEV)) | |
2176 | return 0; /* non DEV error: not handled */ | |
2177 | edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; | |
2178 | if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) | |
2179 | return 0; /* other problems: not handled */ | |
2180 | ||
2181 | if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { | |
2182 | /* | |
2183 | * EDMA should NOT have self-disabled for this case. | |
2184 | * If it did, then something is wrong elsewhere, | |
2185 | * and we cannot handle it here. | |
2186 | */ | |
2187 | if (edma_err_cause & EDMA_ERR_SELF_DIS) { | |
2188 | ata_port_printk(ap, KERN_WARNING, | |
2189 | "%s: err_cause=0x%x pp_flags=0x%x\n", | |
2190 | __func__, edma_err_cause, pp->pp_flags); | |
2191 | return 0; /* not handled */ | |
2192 | } | |
2193 | return mv_handle_fbs_ncq_dev_err(ap); | |
2194 | } else { | |
2195 | /* | |
2196 | * EDMA should have self-disabled for this case. | |
2197 | * If it did not, then something is wrong elsewhere, | |
2198 | * and we cannot handle it here. | |
2199 | */ | |
2200 | if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { | |
2201 | ata_port_printk(ap, KERN_WARNING, | |
2202 | "%s: err_cause=0x%x pp_flags=0x%x\n", | |
2203 | __func__, edma_err_cause, pp->pp_flags); | |
2204 | return 0; /* not handled */ | |
2205 | } | |
2206 | return mv_handle_fbs_non_ncq_dev_err(ap); | |
2207 | } | |
2208 | return 0; /* not handled */ | |
2209 | } | |
2210 | ||
a9010329 | 2211 | static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) |
8f767f8a | 2212 | { |
8f767f8a | 2213 | struct ata_eh_info *ehi = &ap->link.eh_info; |
a9010329 | 2214 | char *when = "idle"; |
8f767f8a | 2215 | |
8f767f8a | 2216 | ata_ehi_clear_desc(ehi); |
a9010329 ML |
2217 | if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { |
2218 | when = "disabled"; | |
2219 | } else if (edma_was_enabled) { | |
2220 | when = "EDMA enabled"; | |
8f767f8a ML |
2221 | } else { |
2222 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); | |
2223 | if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) | |
a9010329 | 2224 | when = "polling"; |
8f767f8a | 2225 | } |
a9010329 | 2226 | ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when); |
8f767f8a ML |
2227 | ehi->err_mask |= AC_ERR_OTHER; |
2228 | ehi->action |= ATA_EH_RESET; | |
2229 | ata_port_freeze(ap); | |
2230 | } | |
2231 | ||
05b308e1 BR |
2232 | /** |
2233 | * mv_err_intr - Handle error interrupts on the port | |
2234 | * @ap: ATA channel to manipulate | |
2235 | * | |
8d07379d ML |
2236 | * Most cases require a full reset of the chip's state machine, |
2237 | * which also performs a COMRESET. | |
2238 | * Also, if the port disabled DMA, update our cached copy to match. | |
05b308e1 BR |
2239 | * |
2240 | * LOCKING: | |
2241 | * Inherited from caller. | |
2242 | */ | |
37b9046a | 2243 | static void mv_err_intr(struct ata_port *ap) |
31961943 BR |
2244 | { |
2245 | void __iomem *port_mmio = mv_ap_base(ap); | |
bdd4ddde | 2246 | u32 edma_err_cause, eh_freeze_mask, serr = 0; |
e4006077 | 2247 | u32 fis_cause = 0; |
bdd4ddde JG |
2248 | struct mv_port_priv *pp = ap->private_data; |
2249 | struct mv_host_priv *hpriv = ap->host->private_data; | |
bdd4ddde | 2250 | unsigned int action = 0, err_mask = 0; |
9af5c9c9 | 2251 | struct ata_eh_info *ehi = &ap->link.eh_info; |
37b9046a ML |
2252 | struct ata_queued_cmd *qc; |
2253 | int abort = 0; | |
20f733e7 | 2254 | |
8d07379d | 2255 | /* |
37b9046a | 2256 | * Read and clear the SError and err_cause bits. |
e4006077 ML |
2257 | * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear |
2258 | * the FIS_IRQ_CAUSE register before clearing edma_err_cause. | |
8d07379d | 2259 | */ |
37b9046a ML |
2260 | sata_scr_read(&ap->link, SCR_ERROR, &serr); |
2261 | sata_scr_write_flush(&ap->link, SCR_ERROR, serr); | |
2262 | ||
bdd4ddde | 2263 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
e4006077 ML |
2264 | if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { |
2265 | fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | |
2266 | writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | |
2267 | } | |
8d07379d | 2268 | writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
bdd4ddde | 2269 | |
4c299ca3 ML |
2270 | if (edma_err_cause & EDMA_ERR_DEV) { |
2271 | /* | |
2272 | * Device errors during FIS-based switching operation | |
2273 | * require special handling. | |
2274 | */ | |
2275 | if (mv_handle_dev_err(ap, edma_err_cause)) | |
2276 | return; | |
2277 | } | |
2278 | ||
37b9046a ML |
2279 | qc = mv_get_active_qc(ap); |
2280 | ata_ehi_clear_desc(ehi); | |
2281 | ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", | |
2282 | edma_err_cause, pp->pp_flags); | |
e4006077 | 2283 | |
c443c500 | 2284 | if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { |
e4006077 | 2285 | ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause); |
c443c500 ML |
2286 | if (fis_cause & SATA_FIS_IRQ_AN) { |
2287 | u32 ec = edma_err_cause & | |
2288 | ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); | |
2289 | sata_async_notification(ap); | |
2290 | if (!ec) | |
2291 | return; /* Just an AN; no need for the nukes */ | |
2292 | ata_ehi_push_desc(ehi, "SDB notify"); | |
2293 | } | |
2294 | } | |
bdd4ddde | 2295 | /* |
352fab70 | 2296 | * All generations share these EDMA error cause bits: |
bdd4ddde | 2297 | */ |
37b9046a | 2298 | if (edma_err_cause & EDMA_ERR_DEV) { |
bdd4ddde | 2299 | err_mask |= AC_ERR_DEV; |
37b9046a ML |
2300 | action |= ATA_EH_RESET; |
2301 | ata_ehi_push_desc(ehi, "dev error"); | |
2302 | } | |
bdd4ddde | 2303 | if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | |
6c1153e0 | 2304 | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | |
bdd4ddde JG |
2305 | EDMA_ERR_INTRL_PAR)) { |
2306 | err_mask |= AC_ERR_ATA_BUS; | |
cf480626 | 2307 | action |= ATA_EH_RESET; |
b64bbc39 | 2308 | ata_ehi_push_desc(ehi, "parity error"); |
bdd4ddde JG |
2309 | } |
2310 | if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { | |
2311 | ata_ehi_hotplugged(ehi); | |
2312 | ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? | |
b64bbc39 | 2313 | "dev disconnect" : "dev connect"); |
cf480626 | 2314 | action |= ATA_EH_RESET; |
bdd4ddde JG |
2315 | } |
2316 | ||
352fab70 ML |
2317 | /* |
2318 | * Gen-I has a different SELF_DIS bit, | |
2319 | * different FREEZE bits, and no SERR bit: | |
2320 | */ | |
ee9ccdf7 | 2321 | if (IS_GEN_I(hpriv)) { |
bdd4ddde | 2322 | eh_freeze_mask = EDMA_EH_FREEZE_5; |
bdd4ddde | 2323 | if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { |
bdd4ddde | 2324 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
b64bbc39 | 2325 | ata_ehi_push_desc(ehi, "EDMA self-disable"); |
bdd4ddde JG |
2326 | } |
2327 | } else { | |
2328 | eh_freeze_mask = EDMA_EH_FREEZE; | |
bdd4ddde | 2329 | if (edma_err_cause & EDMA_ERR_SELF_DIS) { |
bdd4ddde | 2330 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
b64bbc39 | 2331 | ata_ehi_push_desc(ehi, "EDMA self-disable"); |
bdd4ddde | 2332 | } |
bdd4ddde | 2333 | if (edma_err_cause & EDMA_ERR_SERR) { |
8d07379d ML |
2334 | ata_ehi_push_desc(ehi, "SError=%08x", serr); |
2335 | err_mask |= AC_ERR_ATA_BUS; | |
cf480626 | 2336 | action |= ATA_EH_RESET; |
bdd4ddde | 2337 | } |
afb0edd9 | 2338 | } |
20f733e7 | 2339 | |
bdd4ddde JG |
2340 | if (!err_mask) { |
2341 | err_mask = AC_ERR_OTHER; | |
cf480626 | 2342 | action |= ATA_EH_RESET; |
bdd4ddde JG |
2343 | } |
2344 | ||
2345 | ehi->serror |= serr; | |
2346 | ehi->action |= action; | |
2347 | ||
2348 | if (qc) | |
2349 | qc->err_mask |= err_mask; | |
2350 | else | |
2351 | ehi->err_mask |= err_mask; | |
2352 | ||
37b9046a ML |
2353 | if (err_mask == AC_ERR_DEV) { |
2354 | /* | |
2355 | * Cannot do ata_port_freeze() here, | |
2356 | * because it would kill PIO access, | |
2357 | * which is needed for further diagnosis. | |
2358 | */ | |
2359 | mv_eh_freeze(ap); | |
2360 | abort = 1; | |
2361 | } else if (edma_err_cause & eh_freeze_mask) { | |
2362 | /* | |
2363 | * Note to self: ata_port_freeze() calls ata_port_abort() | |
2364 | */ | |
bdd4ddde | 2365 | ata_port_freeze(ap); |
37b9046a ML |
2366 | } else { |
2367 | abort = 1; | |
2368 | } | |
2369 | ||
2370 | if (abort) { | |
2371 | if (qc) | |
2372 | ata_link_abort(qc->dev->link); | |
2373 | else | |
2374 | ata_port_abort(ap); | |
2375 | } | |
bdd4ddde JG |
2376 | } |
2377 | ||
fcfb1f77 ML |
2378 | static void mv_process_crpb_response(struct ata_port *ap, |
2379 | struct mv_crpb *response, unsigned int tag, int ncq_enabled) | |
2380 | { | |
2381 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); | |
2382 | ||
2383 | if (qc) { | |
2384 | u8 ata_status; | |
2385 | u16 edma_status = le16_to_cpu(response->flags); | |
2386 | /* | |
2387 | * edma_status from a response queue entry: | |
2388 | * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only). | |
2389 | * MSB is saved ATA status from command completion. | |
2390 | */ | |
2391 | if (!ncq_enabled) { | |
2392 | u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV; | |
2393 | if (err_cause) { | |
2394 | /* | |
2395 | * Error will be seen/handled by mv_err_intr(). | |
2396 | * So do nothing at all here. | |
2397 | */ | |
2398 | return; | |
2399 | } | |
2400 | } | |
2401 | ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; | |
37b9046a ML |
2402 | if (!ac_err_mask(ata_status)) |
2403 | ata_qc_complete(qc); | |
2404 | /* else: leave it for mv_err_intr() */ | |
fcfb1f77 ML |
2405 | } else { |
2406 | ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", | |
2407 | __func__, tag); | |
2408 | } | |
2409 | } | |
2410 | ||
2411 | static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) | |
bdd4ddde JG |
2412 | { |
2413 | void __iomem *port_mmio = mv_ap_base(ap); | |
2414 | struct mv_host_priv *hpriv = ap->host->private_data; | |
fcfb1f77 | 2415 | u32 in_index; |
bdd4ddde | 2416 | bool work_done = false; |
fcfb1f77 | 2417 | int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); |
bdd4ddde | 2418 | |
fcfb1f77 | 2419 | /* Get the hardware queue position index */ |
bdd4ddde JG |
2420 | in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) |
2421 | >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | |
2422 | ||
fcfb1f77 ML |
2423 | /* Process new responses from since the last time we looked */ |
2424 | while (in_index != pp->resp_idx) { | |
6c1153e0 | 2425 | unsigned int tag; |
fcfb1f77 | 2426 | struct mv_crpb *response = &pp->crpb[pp->resp_idx]; |
bdd4ddde | 2427 | |
fcfb1f77 | 2428 | pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; |
bdd4ddde | 2429 | |
fcfb1f77 ML |
2430 | if (IS_GEN_I(hpriv)) { |
2431 | /* 50xx: no NCQ, only one command active at a time */ | |
9af5c9c9 | 2432 | tag = ap->link.active_tag; |
fcfb1f77 ML |
2433 | } else { |
2434 | /* Gen II/IIE: get command tag from CRPB entry */ | |
2435 | tag = le16_to_cpu(response->id) & 0x1f; | |
bdd4ddde | 2436 | } |
fcfb1f77 | 2437 | mv_process_crpb_response(ap, response, tag, ncq_enabled); |
bdd4ddde | 2438 | work_done = true; |
bdd4ddde JG |
2439 | } |
2440 | ||
352fab70 | 2441 | /* Update the software queue position index in hardware */ |
bdd4ddde JG |
2442 | if (work_done) |
2443 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | | |
fcfb1f77 | 2444 | (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), |
bdd4ddde | 2445 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
20f733e7 BR |
2446 | } |
2447 | ||
a9010329 ML |
2448 | static void mv_port_intr(struct ata_port *ap, u32 port_cause) |
2449 | { | |
2450 | struct mv_port_priv *pp; | |
2451 | int edma_was_enabled; | |
2452 | ||
2453 | if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { | |
2454 | mv_unexpected_intr(ap, 0); | |
2455 | return; | |
2456 | } | |
2457 | /* | |
2458 | * Grab a snapshot of the EDMA_EN flag setting, | |
2459 | * so that we have a consistent view for this port, | |
2460 | * even if something we call of our routines changes it. | |
2461 | */ | |
2462 | pp = ap->private_data; | |
2463 | edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); | |
2464 | /* | |
2465 | * Process completed CRPB response(s) before other events. | |
2466 | */ | |
2467 | if (edma_was_enabled && (port_cause & DONE_IRQ)) { | |
2468 | mv_process_crpb_entries(ap, pp); | |
4c299ca3 ML |
2469 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) |
2470 | mv_handle_fbs_ncq_dev_err(ap); | |
a9010329 ML |
2471 | } |
2472 | /* | |
2473 | * Handle chip-reported errors, or continue on to handle PIO. | |
2474 | */ | |
2475 | if (unlikely(port_cause & ERR_IRQ)) { | |
2476 | mv_err_intr(ap); | |
2477 | } else if (!edma_was_enabled) { | |
2478 | struct ata_queued_cmd *qc = mv_get_active_qc(ap); | |
2479 | if (qc) | |
2480 | ata_sff_host_intr(ap, qc); | |
2481 | else | |
2482 | mv_unexpected_intr(ap, edma_was_enabled); | |
2483 | } | |
2484 | } | |
2485 | ||
05b308e1 BR |
2486 | /** |
2487 | * mv_host_intr - Handle all interrupts on the given host controller | |
cca3974e | 2488 | * @host: host specific structure |
7368f919 | 2489 | * @main_irq_cause: Main interrupt cause register for the chip. |
05b308e1 BR |
2490 | * |
2491 | * LOCKING: | |
2492 | * Inherited from caller. | |
2493 | */ | |
7368f919 | 2494 | static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) |
20f733e7 | 2495 | { |
f351b2d6 | 2496 | struct mv_host_priv *hpriv = host->private_data; |
eabd5eb1 | 2497 | void __iomem *mmio = hpriv->base, *hc_mmio; |
a3718c1f | 2498 | unsigned int handled = 0, port; |
20f733e7 | 2499 | |
a3718c1f | 2500 | for (port = 0; port < hpriv->n_ports; port++) { |
cca3974e | 2501 | struct ata_port *ap = host->ports[port]; |
eabd5eb1 ML |
2502 | unsigned int p, shift, hardport, port_cause; |
2503 | ||
a3718c1f | 2504 | MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); |
a3718c1f | 2505 | /* |
eabd5eb1 ML |
2506 | * Each hc within the host has its own hc_irq_cause register, |
2507 | * where the interrupting ports bits get ack'd. | |
a3718c1f | 2508 | */ |
eabd5eb1 ML |
2509 | if (hardport == 0) { /* first port on this hc ? */ |
2510 | u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; | |
2511 | u32 port_mask, ack_irqs; | |
2512 | /* | |
2513 | * Skip this entire hc if nothing pending for any ports | |
2514 | */ | |
2515 | if (!hc_cause) { | |
2516 | port += MV_PORTS_PER_HC - 1; | |
2517 | continue; | |
2518 | } | |
2519 | /* | |
2520 | * We don't need/want to read the hc_irq_cause register, | |
2521 | * because doing so hurts performance, and | |
2522 | * main_irq_cause already gives us everything we need. | |
2523 | * | |
2524 | * But we do have to *write* to the hc_irq_cause to ack | |
2525 | * the ports that we are handling this time through. | |
2526 | * | |
2527 | * This requires that we create a bitmap for those | |
2528 | * ports which interrupted us, and use that bitmap | |
2529 | * to ack (only) those ports via hc_irq_cause. | |
2530 | */ | |
2531 | ack_irqs = 0; | |
2532 | for (p = 0; p < MV_PORTS_PER_HC; ++p) { | |
2533 | if ((port + p) >= hpriv->n_ports) | |
2534 | break; | |
2535 | port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); | |
2536 | if (hc_cause & port_mask) | |
2537 | ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; | |
2538 | } | |
a3718c1f | 2539 | hc_mmio = mv_hc_base_from_port(mmio, port); |
eabd5eb1 | 2540 | writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS); |
a3718c1f ML |
2541 | handled = 1; |
2542 | } | |
8f767f8a | 2543 | /* |
a9010329 | 2544 | * Handle interrupts signalled for this port: |
8f767f8a | 2545 | */ |
a9010329 ML |
2546 | port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); |
2547 | if (port_cause) | |
2548 | mv_port_intr(ap, port_cause); | |
20f733e7 | 2549 | } |
a3718c1f | 2550 | return handled; |
20f733e7 BR |
2551 | } |
2552 | ||
a3718c1f | 2553 | static int mv_pci_error(struct ata_host *host, void __iomem *mmio) |
bdd4ddde | 2554 | { |
02a121da | 2555 | struct mv_host_priv *hpriv = host->private_data; |
bdd4ddde JG |
2556 | struct ata_port *ap; |
2557 | struct ata_queued_cmd *qc; | |
2558 | struct ata_eh_info *ehi; | |
2559 | unsigned int i, err_mask, printed = 0; | |
2560 | u32 err_cause; | |
2561 | ||
02a121da | 2562 | err_cause = readl(mmio + hpriv->irq_cause_ofs); |
bdd4ddde JG |
2563 | |
2564 | dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", | |
2565 | err_cause); | |
2566 | ||
2567 | DPRINTK("All regs @ PCI error\n"); | |
2568 | mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); | |
2569 | ||
02a121da | 2570 | writelfl(0, mmio + hpriv->irq_cause_ofs); |
bdd4ddde JG |
2571 | |
2572 | for (i = 0; i < host->n_ports; i++) { | |
2573 | ap = host->ports[i]; | |
936fd732 | 2574 | if (!ata_link_offline(&ap->link)) { |
9af5c9c9 | 2575 | ehi = &ap->link.eh_info; |
bdd4ddde JG |
2576 | ata_ehi_clear_desc(ehi); |
2577 | if (!printed++) | |
2578 | ata_ehi_push_desc(ehi, | |
2579 | "PCI err cause 0x%08x", err_cause); | |
2580 | err_mask = AC_ERR_HOST_BUS; | |
cf480626 | 2581 | ehi->action = ATA_EH_RESET; |
9af5c9c9 | 2582 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
bdd4ddde JG |
2583 | if (qc) |
2584 | qc->err_mask |= err_mask; | |
2585 | else | |
2586 | ehi->err_mask |= err_mask; | |
2587 | ||
2588 | ata_port_freeze(ap); | |
2589 | } | |
2590 | } | |
a3718c1f | 2591 | return 1; /* handled */ |
bdd4ddde JG |
2592 | } |
2593 | ||
05b308e1 | 2594 | /** |
c5d3e45a | 2595 | * mv_interrupt - Main interrupt event handler |
05b308e1 BR |
2596 | * @irq: unused |
2597 | * @dev_instance: private data; in this case the host structure | |
05b308e1 BR |
2598 | * |
2599 | * Read the read only register to determine if any host | |
2600 | * controllers have pending interrupts. If so, call lower level | |
2601 | * routine to handle. Also check for PCI errors which are only | |
2602 | * reported here. | |
2603 | * | |
8b260248 | 2604 | * LOCKING: |
cca3974e | 2605 | * This routine holds the host lock while processing pending |
05b308e1 BR |
2606 | * interrupts. |
2607 | */ | |
7d12e780 | 2608 | static irqreturn_t mv_interrupt(int irq, void *dev_instance) |
20f733e7 | 2609 | { |
cca3974e | 2610 | struct ata_host *host = dev_instance; |
f351b2d6 | 2611 | struct mv_host_priv *hpriv = host->private_data; |
a3718c1f | 2612 | unsigned int handled = 0; |
6d3c30ef | 2613 | int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI; |
96e2c487 | 2614 | u32 main_irq_cause, pending_irqs; |
20f733e7 | 2615 | |
646a4da5 | 2616 | spin_lock(&host->lock); |
6d3c30ef ML |
2617 | |
2618 | /* for MSI: block new interrupts while in here */ | |
2619 | if (using_msi) | |
2620 | writel(0, hpriv->main_irq_mask_addr); | |
2621 | ||
7368f919 | 2622 | main_irq_cause = readl(hpriv->main_irq_cause_addr); |
96e2c487 | 2623 | pending_irqs = main_irq_cause & hpriv->main_irq_mask; |
352fab70 ML |
2624 | /* |
2625 | * Deal with cases where we either have nothing pending, or have read | |
2626 | * a bogus register value which can indicate HW removal or PCI fault. | |
20f733e7 | 2627 | */ |
a44253d2 | 2628 | if (pending_irqs && main_irq_cause != 0xffffffffU) { |
1f398472 | 2629 | if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) |
a3718c1f ML |
2630 | handled = mv_pci_error(host, hpriv->base); |
2631 | else | |
a44253d2 | 2632 | handled = mv_host_intr(host, pending_irqs); |
bdd4ddde | 2633 | } |
6d3c30ef ML |
2634 | |
2635 | /* for MSI: unmask; interrupt cause bits will retrigger now */ | |
2636 | if (using_msi) | |
2637 | writel(hpriv->main_irq_mask, hpriv->main_irq_mask_addr); | |
2638 | ||
9d51af7b ML |
2639 | spin_unlock(&host->lock); |
2640 | ||
20f733e7 BR |
2641 | return IRQ_RETVAL(handled); |
2642 | } | |
2643 | ||
c9d39130 JG |
2644 | static unsigned int mv5_scr_offset(unsigned int sc_reg_in) |
2645 | { | |
2646 | unsigned int ofs; | |
2647 | ||
2648 | switch (sc_reg_in) { | |
2649 | case SCR_STATUS: | |
2650 | case SCR_ERROR: | |
2651 | case SCR_CONTROL: | |
2652 | ofs = sc_reg_in * sizeof(u32); | |
2653 | break; | |
2654 | default: | |
2655 | ofs = 0xffffffffU; | |
2656 | break; | |
2657 | } | |
2658 | return ofs; | |
2659 | } | |
2660 | ||
82ef04fb | 2661 | static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) |
c9d39130 | 2662 | { |
82ef04fb | 2663 | struct mv_host_priv *hpriv = link->ap->host->private_data; |
f351b2d6 | 2664 | void __iomem *mmio = hpriv->base; |
82ef04fb | 2665 | void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); |
c9d39130 JG |
2666 | unsigned int ofs = mv5_scr_offset(sc_reg_in); |
2667 | ||
da3dbb17 TH |
2668 | if (ofs != 0xffffffffU) { |
2669 | *val = readl(addr + ofs); | |
2670 | return 0; | |
2671 | } else | |
2672 | return -EINVAL; | |
c9d39130 JG |
2673 | } |
2674 | ||
82ef04fb | 2675 | static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) |
c9d39130 | 2676 | { |
82ef04fb | 2677 | struct mv_host_priv *hpriv = link->ap->host->private_data; |
f351b2d6 | 2678 | void __iomem *mmio = hpriv->base; |
82ef04fb | 2679 | void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); |
c9d39130 JG |
2680 | unsigned int ofs = mv5_scr_offset(sc_reg_in); |
2681 | ||
da3dbb17 | 2682 | if (ofs != 0xffffffffU) { |
0d5ff566 | 2683 | writelfl(val, addr + ofs); |
da3dbb17 TH |
2684 | return 0; |
2685 | } else | |
2686 | return -EINVAL; | |
c9d39130 JG |
2687 | } |
2688 | ||
7bb3c529 | 2689 | static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) |
522479fb | 2690 | { |
7bb3c529 | 2691 | struct pci_dev *pdev = to_pci_dev(host->dev); |
522479fb JG |
2692 | int early_5080; |
2693 | ||
44c10138 | 2694 | early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); |
522479fb JG |
2695 | |
2696 | if (!early_5080) { | |
2697 | u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); | |
2698 | tmp |= (1 << 0); | |
2699 | writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); | |
2700 | } | |
2701 | ||
7bb3c529 | 2702 | mv_reset_pci_bus(host, mmio); |
522479fb JG |
2703 | } |
2704 | ||
2705 | static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) | |
2706 | { | |
8e7decdb | 2707 | writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS); |
522479fb JG |
2708 | } |
2709 | ||
47c2b677 | 2710 | static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, |
ba3fe8fb JG |
2711 | void __iomem *mmio) |
2712 | { | |
c9d39130 JG |
2713 | void __iomem *phy_mmio = mv5_phy_base(mmio, idx); |
2714 | u32 tmp; | |
2715 | ||
2716 | tmp = readl(phy_mmio + MV5_PHY_MODE); | |
2717 | ||
2718 | hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */ | |
2719 | hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */ | |
ba3fe8fb JG |
2720 | } |
2721 | ||
47c2b677 | 2722 | static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) |
ba3fe8fb | 2723 | { |
522479fb JG |
2724 | u32 tmp; |
2725 | ||
8e7decdb | 2726 | writel(0, mmio + MV_GPIO_PORT_CTL_OFS); |
522479fb JG |
2727 | |
2728 | /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ | |
2729 | ||
2730 | tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); | |
2731 | tmp |= ~(1 << 0); | |
2732 | writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); | |
ba3fe8fb JG |
2733 | } |
2734 | ||
2a47ce06 JG |
2735 | static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
2736 | unsigned int port) | |
bca1c4eb | 2737 | { |
c9d39130 JG |
2738 | void __iomem *phy_mmio = mv5_phy_base(mmio, port); |
2739 | const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5); | |
2740 | u32 tmp; | |
2741 | int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); | |
2742 | ||
2743 | if (fix_apm_sq) { | |
8e7decdb | 2744 | tmp = readl(phy_mmio + MV5_LTMODE_OFS); |
c9d39130 | 2745 | tmp |= (1 << 19); |
8e7decdb | 2746 | writel(tmp, phy_mmio + MV5_LTMODE_OFS); |
c9d39130 | 2747 | |
8e7decdb | 2748 | tmp = readl(phy_mmio + MV5_PHY_CTL_OFS); |
c9d39130 JG |
2749 | tmp &= ~0x3; |
2750 | tmp |= 0x1; | |
8e7decdb | 2751 | writel(tmp, phy_mmio + MV5_PHY_CTL_OFS); |
c9d39130 JG |
2752 | } |
2753 | ||
2754 | tmp = readl(phy_mmio + MV5_PHY_MODE); | |
2755 | tmp &= ~mask; | |
2756 | tmp |= hpriv->signal[port].pre; | |
2757 | tmp |= hpriv->signal[port].amps; | |
2758 | writel(tmp, phy_mmio + MV5_PHY_MODE); | |
bca1c4eb JG |
2759 | } |
2760 | ||
c9d39130 JG |
2761 | |
2762 | #undef ZERO | |
2763 | #define ZERO(reg) writel(0, port_mmio + (reg)) | |
2764 | static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, | |
2765 | unsigned int port) | |
2766 | { | |
2767 | void __iomem *port_mmio = mv_port_base(mmio, port); | |
2768 | ||
e12bef50 | 2769 | mv_reset_channel(hpriv, mmio, port); |
c9d39130 JG |
2770 | |
2771 | ZERO(0x028); /* command */ | |
2772 | writel(0x11f, port_mmio + EDMA_CFG_OFS); | |
2773 | ZERO(0x004); /* timer */ | |
2774 | ZERO(0x008); /* irq err cause */ | |
2775 | ZERO(0x00c); /* irq err mask */ | |
2776 | ZERO(0x010); /* rq bah */ | |
2777 | ZERO(0x014); /* rq inp */ | |
2778 | ZERO(0x018); /* rq outp */ | |
2779 | ZERO(0x01c); /* respq bah */ | |
2780 | ZERO(0x024); /* respq outp */ | |
2781 | ZERO(0x020); /* respq inp */ | |
2782 | ZERO(0x02c); /* test control */ | |
8e7decdb | 2783 | writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); |
c9d39130 JG |
2784 | } |
2785 | #undef ZERO | |
2786 | ||
2787 | #define ZERO(reg) writel(0, hc_mmio + (reg)) | |
2788 | static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, | |
2789 | unsigned int hc) | |
47c2b677 | 2790 | { |
c9d39130 JG |
2791 | void __iomem *hc_mmio = mv_hc_base(mmio, hc); |
2792 | u32 tmp; | |
2793 | ||
2794 | ZERO(0x00c); | |
2795 | ZERO(0x010); | |
2796 | ZERO(0x014); | |
2797 | ZERO(0x018); | |
2798 | ||
2799 | tmp = readl(hc_mmio + 0x20); | |
2800 | tmp &= 0x1c1c1c1c; | |
2801 | tmp |= 0x03030303; | |
2802 | writel(tmp, hc_mmio + 0x20); | |
2803 | } | |
2804 | #undef ZERO | |
2805 | ||
2806 | static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, | |
2807 | unsigned int n_hc) | |
2808 | { | |
2809 | unsigned int hc, port; | |
2810 | ||
2811 | for (hc = 0; hc < n_hc; hc++) { | |
2812 | for (port = 0; port < MV_PORTS_PER_HC; port++) | |
2813 | mv5_reset_hc_port(hpriv, mmio, | |
2814 | (hc * MV_PORTS_PER_HC) + port); | |
2815 | ||
2816 | mv5_reset_one_hc(hpriv, mmio, hc); | |
2817 | } | |
2818 | ||
2819 | return 0; | |
47c2b677 JG |
2820 | } |
2821 | ||
101ffae2 JG |
2822 | #undef ZERO |
2823 | #define ZERO(reg) writel(0, mmio + (reg)) | |
7bb3c529 | 2824 | static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) |
101ffae2 | 2825 | { |
02a121da | 2826 | struct mv_host_priv *hpriv = host->private_data; |
101ffae2 JG |
2827 | u32 tmp; |
2828 | ||
8e7decdb | 2829 | tmp = readl(mmio + MV_PCI_MODE_OFS); |
101ffae2 | 2830 | tmp &= 0xff00ffff; |
8e7decdb | 2831 | writel(tmp, mmio + MV_PCI_MODE_OFS); |
101ffae2 JG |
2832 | |
2833 | ZERO(MV_PCI_DISC_TIMER); | |
2834 | ZERO(MV_PCI_MSI_TRIGGER); | |
8e7decdb | 2835 | writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS); |
101ffae2 | 2836 | ZERO(MV_PCI_SERR_MASK); |
02a121da ML |
2837 | ZERO(hpriv->irq_cause_ofs); |
2838 | ZERO(hpriv->irq_mask_ofs); | |
101ffae2 JG |
2839 | ZERO(MV_PCI_ERR_LOW_ADDRESS); |
2840 | ZERO(MV_PCI_ERR_HIGH_ADDRESS); | |
2841 | ZERO(MV_PCI_ERR_ATTRIBUTE); | |
2842 | ZERO(MV_PCI_ERR_COMMAND); | |
2843 | } | |
2844 | #undef ZERO | |
2845 | ||
2846 | static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) | |
2847 | { | |
2848 | u32 tmp; | |
2849 | ||
2850 | mv5_reset_flash(hpriv, mmio); | |
2851 | ||
8e7decdb | 2852 | tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS); |
101ffae2 JG |
2853 | tmp &= 0x3; |
2854 | tmp |= (1 << 5) | (1 << 6); | |
8e7decdb | 2855 | writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS); |
101ffae2 JG |
2856 | } |
2857 | ||
2858 | /** | |
2859 | * mv6_reset_hc - Perform the 6xxx global soft reset | |
2860 | * @mmio: base address of the HBA | |
2861 | * | |
2862 | * This routine only applies to 6xxx parts. | |
2863 | * | |
2864 | * LOCKING: | |
2865 | * Inherited from caller. | |
2866 | */ | |
c9d39130 JG |
2867 | static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, |
2868 | unsigned int n_hc) | |
101ffae2 JG |
2869 | { |
2870 | void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS; | |
2871 | int i, rc = 0; | |
2872 | u32 t; | |
2873 | ||
2874 | /* Following procedure defined in PCI "main command and status | |
2875 | * register" table. | |
2876 | */ | |
2877 | t = readl(reg); | |
2878 | writel(t | STOP_PCI_MASTER, reg); | |
2879 | ||
2880 | for (i = 0; i < 1000; i++) { | |
2881 | udelay(1); | |
2882 | t = readl(reg); | |
2dcb407e | 2883 | if (PCI_MASTER_EMPTY & t) |
101ffae2 | 2884 | break; |
101ffae2 JG |
2885 | } |
2886 | if (!(PCI_MASTER_EMPTY & t)) { | |
2887 | printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); | |
2888 | rc = 1; | |
2889 | goto done; | |
2890 | } | |
2891 | ||
2892 | /* set reset */ | |
2893 | i = 5; | |
2894 | do { | |
2895 | writel(t | GLOB_SFT_RST, reg); | |
2896 | t = readl(reg); | |
2897 | udelay(1); | |
2898 | } while (!(GLOB_SFT_RST & t) && (i-- > 0)); | |
2899 | ||
2900 | if (!(GLOB_SFT_RST & t)) { | |
2901 | printk(KERN_ERR DRV_NAME ": can't set global reset\n"); | |
2902 | rc = 1; | |
2903 | goto done; | |
2904 | } | |
2905 | ||
2906 | /* clear reset and *reenable the PCI master* (not mentioned in spec) */ | |
2907 | i = 5; | |
2908 | do { | |
2909 | writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); | |
2910 | t = readl(reg); | |
2911 | udelay(1); | |
2912 | } while ((GLOB_SFT_RST & t) && (i-- > 0)); | |
2913 | ||
2914 | if (GLOB_SFT_RST & t) { | |
2915 | printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); | |
2916 | rc = 1; | |
2917 | } | |
2918 | done: | |
2919 | return rc; | |
2920 | } | |
2921 | ||
47c2b677 | 2922 | static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, |
ba3fe8fb JG |
2923 | void __iomem *mmio) |
2924 | { | |
2925 | void __iomem *port_mmio; | |
2926 | u32 tmp; | |
2927 | ||
8e7decdb | 2928 | tmp = readl(mmio + MV_RESET_CFG_OFS); |
ba3fe8fb | 2929 | if ((tmp & (1 << 0)) == 0) { |
47c2b677 | 2930 | hpriv->signal[idx].amps = 0x7 << 8; |
ba3fe8fb JG |
2931 | hpriv->signal[idx].pre = 0x1 << 5; |
2932 | return; | |
2933 | } | |
2934 | ||
2935 | port_mmio = mv_port_base(mmio, idx); | |
2936 | tmp = readl(port_mmio + PHY_MODE2); | |
2937 | ||
2938 | hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ | |
2939 | hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ | |
2940 | } | |
2941 | ||
47c2b677 | 2942 | static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) |
ba3fe8fb | 2943 | { |
8e7decdb | 2944 | writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS); |
ba3fe8fb JG |
2945 | } |
2946 | ||
c9d39130 | 2947 | static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
2a47ce06 | 2948 | unsigned int port) |
bca1c4eb | 2949 | { |
c9d39130 JG |
2950 | void __iomem *port_mmio = mv_port_base(mmio, port); |
2951 | ||
bca1c4eb | 2952 | u32 hp_flags = hpriv->hp_flags; |
47c2b677 JG |
2953 | int fix_phy_mode2 = |
2954 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); | |
bca1c4eb | 2955 | int fix_phy_mode4 = |
47c2b677 | 2956 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); |
8c30a8b9 | 2957 | u32 m2, m3; |
47c2b677 JG |
2958 | |
2959 | if (fix_phy_mode2) { | |
2960 | m2 = readl(port_mmio + PHY_MODE2); | |
2961 | m2 &= ~(1 << 16); | |
2962 | m2 |= (1 << 31); | |
2963 | writel(m2, port_mmio + PHY_MODE2); | |
2964 | ||
2965 | udelay(200); | |
2966 | ||
2967 | m2 = readl(port_mmio + PHY_MODE2); | |
2968 | m2 &= ~((1 << 16) | (1 << 31)); | |
2969 | writel(m2, port_mmio + PHY_MODE2); | |
2970 | ||
2971 | udelay(200); | |
2972 | } | |
2973 | ||
8c30a8b9 ML |
2974 | /* |
2975 | * Gen-II/IIe PHY_MODE3 errata RM#2: | |
2976 | * Achieves better receiver noise performance than the h/w default: | |
2977 | */ | |
2978 | m3 = readl(port_mmio + PHY_MODE3); | |
2979 | m3 = (m3 & 0x1f) | (0x5555601 << 5); | |
bca1c4eb | 2980 | |
0388a8c0 ML |
2981 | /* Guideline 88F5182 (GL# SATA-S11) */ |
2982 | if (IS_SOC(hpriv)) | |
2983 | m3 &= ~0x1c; | |
2984 | ||
bca1c4eb | 2985 | if (fix_phy_mode4) { |
ba069e37 ML |
2986 | u32 m4 = readl(port_mmio + PHY_MODE4); |
2987 | /* | |
2988 | * Enforce reserved-bit restrictions on GenIIe devices only. | |
2989 | * For earlier chipsets, force only the internal config field | |
2990 | * (workaround for errata FEr SATA#10 part 1). | |
2991 | */ | |
8c30a8b9 | 2992 | if (IS_GEN_IIE(hpriv)) |
ba069e37 ML |
2993 | m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES; |
2994 | else | |
2995 | m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE; | |
8c30a8b9 | 2996 | writel(m4, port_mmio + PHY_MODE4); |
bca1c4eb | 2997 | } |
b406c7a6 ML |
2998 | /* |
2999 | * Workaround for 60x1-B2 errata SATA#13: | |
3000 | * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, | |
3001 | * so we must always rewrite PHY_MODE3 after PHY_MODE4. | |
3002 | */ | |
3003 | writel(m3, port_mmio + PHY_MODE3); | |
bca1c4eb JG |
3004 | |
3005 | /* Revert values of pre-emphasis and signal amps to the saved ones */ | |
3006 | m2 = readl(port_mmio + PHY_MODE2); | |
3007 | ||
3008 | m2 &= ~MV_M2_PREAMP_MASK; | |
2a47ce06 JG |
3009 | m2 |= hpriv->signal[port].amps; |
3010 | m2 |= hpriv->signal[port].pre; | |
47c2b677 | 3011 | m2 &= ~(1 << 16); |
bca1c4eb | 3012 | |
e4e7b892 JG |
3013 | /* according to mvSata 3.6.1, some IIE values are fixed */ |
3014 | if (IS_GEN_IIE(hpriv)) { | |
3015 | m2 &= ~0xC30FF01F; | |
3016 | m2 |= 0x0000900F; | |
3017 | } | |
3018 | ||
bca1c4eb JG |
3019 | writel(m2, port_mmio + PHY_MODE2); |
3020 | } | |
3021 | ||
f351b2d6 SB |
3022 | /* TODO: use the generic LED interface to configure the SATA Presence */ |
3023 | /* & Acitivy LEDs on the board */ | |
3024 | static void mv_soc_enable_leds(struct mv_host_priv *hpriv, | |
3025 | void __iomem *mmio) | |
3026 | { | |
3027 | return; | |
3028 | } | |
3029 | ||
3030 | static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, | |
3031 | void __iomem *mmio) | |
3032 | { | |
3033 | void __iomem *port_mmio; | |
3034 | u32 tmp; | |
3035 | ||
3036 | port_mmio = mv_port_base(mmio, idx); | |
3037 | tmp = readl(port_mmio + PHY_MODE2); | |
3038 | ||
3039 | hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ | |
3040 | hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ | |
3041 | } | |
3042 | ||
3043 | #undef ZERO | |
3044 | #define ZERO(reg) writel(0, port_mmio + (reg)) | |
3045 | static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, | |
3046 | void __iomem *mmio, unsigned int port) | |
3047 | { | |
3048 | void __iomem *port_mmio = mv_port_base(mmio, port); | |
3049 | ||
e12bef50 | 3050 | mv_reset_channel(hpriv, mmio, port); |
f351b2d6 SB |
3051 | |
3052 | ZERO(0x028); /* command */ | |
3053 | writel(0x101f, port_mmio + EDMA_CFG_OFS); | |
3054 | ZERO(0x004); /* timer */ | |
3055 | ZERO(0x008); /* irq err cause */ | |
3056 | ZERO(0x00c); /* irq err mask */ | |
3057 | ZERO(0x010); /* rq bah */ | |
3058 | ZERO(0x014); /* rq inp */ | |
3059 | ZERO(0x018); /* rq outp */ | |
3060 | ZERO(0x01c); /* respq bah */ | |
3061 | ZERO(0x024); /* respq outp */ | |
3062 | ZERO(0x020); /* respq inp */ | |
3063 | ZERO(0x02c); /* test control */ | |
8e7decdb | 3064 | writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); |
f351b2d6 SB |
3065 | } |
3066 | ||
3067 | #undef ZERO | |
3068 | ||
3069 | #define ZERO(reg) writel(0, hc_mmio + (reg)) | |
3070 | static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv, | |
3071 | void __iomem *mmio) | |
3072 | { | |
3073 | void __iomem *hc_mmio = mv_hc_base(mmio, 0); | |
3074 | ||
3075 | ZERO(0x00c); | |
3076 | ZERO(0x010); | |
3077 | ZERO(0x014); | |
3078 | ||
3079 | } | |
3080 | ||
3081 | #undef ZERO | |
3082 | ||
3083 | static int mv_soc_reset_hc(struct mv_host_priv *hpriv, | |
3084 | void __iomem *mmio, unsigned int n_hc) | |
3085 | { | |
3086 | unsigned int port; | |
3087 | ||
3088 | for (port = 0; port < hpriv->n_ports; port++) | |
3089 | mv_soc_reset_hc_port(hpriv, mmio, port); | |
3090 | ||
3091 | mv_soc_reset_one_hc(hpriv, mmio); | |
3092 | ||
3093 | return 0; | |
3094 | } | |
3095 | ||
3096 | static void mv_soc_reset_flash(struct mv_host_priv *hpriv, | |
3097 | void __iomem *mmio) | |
3098 | { | |
3099 | return; | |
3100 | } | |
3101 | ||
3102 | static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) | |
3103 | { | |
3104 | return; | |
3105 | } | |
3106 | ||
8e7decdb | 3107 | static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) |
b67a1064 | 3108 | { |
8e7decdb | 3109 | u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS); |
b67a1064 | 3110 | |
8e7decdb | 3111 | ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ |
b67a1064 | 3112 | if (want_gen2i) |
8e7decdb ML |
3113 | ifcfg |= (1 << 7); /* enable gen2i speed */ |
3114 | writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS); | |
b67a1064 ML |
3115 | } |
3116 | ||
e12bef50 | 3117 | static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, |
c9d39130 JG |
3118 | unsigned int port_no) |
3119 | { | |
3120 | void __iomem *port_mmio = mv_port_base(mmio, port_no); | |
3121 | ||
8e7decdb ML |
3122 | /* |
3123 | * The datasheet warns against setting EDMA_RESET when EDMA is active | |
3124 | * (but doesn't say what the problem might be). So we first try | |
3125 | * to disable the EDMA engine before doing the EDMA_RESET operation. | |
3126 | */ | |
0d8be5cb | 3127 | mv_stop_edma_engine(port_mmio); |
8e7decdb | 3128 | writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); |
c9d39130 | 3129 | |
b67a1064 | 3130 | if (!IS_GEN_I(hpriv)) { |
8e7decdb ML |
3131 | /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ |
3132 | mv_setup_ifcfg(port_mmio, 1); | |
c9d39130 | 3133 | } |
b67a1064 | 3134 | /* |
8e7decdb | 3135 | * Strobing EDMA_RESET here causes a hard reset of the SATA transport, |
b67a1064 ML |
3136 | * link, and physical layers. It resets all SATA interface registers |
3137 | * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev. | |
c9d39130 | 3138 | */ |
8e7decdb | 3139 | writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); |
b67a1064 | 3140 | udelay(25); /* allow reset propagation */ |
c9d39130 JG |
3141 | writelfl(0, port_mmio + EDMA_CMD_OFS); |
3142 | ||
3143 | hpriv->ops->phy_errata(hpriv, mmio, port_no); | |
3144 | ||
ee9ccdf7 | 3145 | if (IS_GEN_I(hpriv)) |
c9d39130 JG |
3146 | mdelay(1); |
3147 | } | |
3148 | ||
e49856d8 | 3149 | static void mv_pmp_select(struct ata_port *ap, int pmp) |
20f733e7 | 3150 | { |
e49856d8 ML |
3151 | if (sata_pmp_supported(ap)) { |
3152 | void __iomem *port_mmio = mv_ap_base(ap); | |
3153 | u32 reg = readl(port_mmio + SATA_IFCTL_OFS); | |
3154 | int old = reg & 0xf; | |
22374677 | 3155 | |
e49856d8 ML |
3156 | if (old != pmp) { |
3157 | reg = (reg & ~0xf) | pmp; | |
3158 | writelfl(reg, port_mmio + SATA_IFCTL_OFS); | |
3159 | } | |
22374677 | 3160 | } |
20f733e7 BR |
3161 | } |
3162 | ||
e49856d8 ML |
3163 | static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, |
3164 | unsigned long deadline) | |
22374677 | 3165 | { |
e49856d8 ML |
3166 | mv_pmp_select(link->ap, sata_srst_pmp(link)); |
3167 | return sata_std_hardreset(link, class, deadline); | |
3168 | } | |
bdd4ddde | 3169 | |
e49856d8 ML |
3170 | static int mv_softreset(struct ata_link *link, unsigned int *class, |
3171 | unsigned long deadline) | |
3172 | { | |
3173 | mv_pmp_select(link->ap, sata_srst_pmp(link)); | |
3174 | return ata_sff_softreset(link, class, deadline); | |
22374677 JG |
3175 | } |
3176 | ||
cc0680a5 | 3177 | static int mv_hardreset(struct ata_link *link, unsigned int *class, |
bdd4ddde | 3178 | unsigned long deadline) |
31961943 | 3179 | { |
cc0680a5 | 3180 | struct ata_port *ap = link->ap; |
bdd4ddde | 3181 | struct mv_host_priv *hpriv = ap->host->private_data; |
b562468c | 3182 | struct mv_port_priv *pp = ap->private_data; |
f351b2d6 | 3183 | void __iomem *mmio = hpriv->base; |
0d8be5cb ML |
3184 | int rc, attempts = 0, extra = 0; |
3185 | u32 sstatus; | |
3186 | bool online; | |
31961943 | 3187 | |
e12bef50 | 3188 | mv_reset_channel(hpriv, mmio, ap->port_no); |
b562468c | 3189 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
d16ab3f6 ML |
3190 | pp->pp_flags &= |
3191 | ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); | |
bdd4ddde | 3192 | |
0d8be5cb ML |
3193 | /* Workaround for errata FEr SATA#10 (part 2) */ |
3194 | do { | |
17c5aab5 ML |
3195 | const unsigned long *timing = |
3196 | sata_ehc_deb_timing(&link->eh_context); | |
bdd4ddde | 3197 | |
17c5aab5 ML |
3198 | rc = sata_link_hardreset(link, timing, deadline + extra, |
3199 | &online, NULL); | |
9dcffd99 | 3200 | rc = online ? -EAGAIN : rc; |
17c5aab5 | 3201 | if (rc) |
0d8be5cb | 3202 | return rc; |
0d8be5cb ML |
3203 | sata_scr_read(link, SCR_STATUS, &sstatus); |
3204 | if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { | |
3205 | /* Force 1.5gb/s link speed and try again */ | |
8e7decdb | 3206 | mv_setup_ifcfg(mv_ap_base(ap), 0); |
0d8be5cb ML |
3207 | if (time_after(jiffies + HZ, deadline)) |
3208 | extra = HZ; /* only extend it once, max */ | |
3209 | } | |
3210 | } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); | |
08da1759 | 3211 | mv_save_cached_regs(ap); |
66e57a2c | 3212 | mv_edma_cfg(ap, 0, 0); |
bdd4ddde | 3213 | |
17c5aab5 | 3214 | return rc; |
bdd4ddde JG |
3215 | } |
3216 | ||
bdd4ddde JG |
3217 | static void mv_eh_freeze(struct ata_port *ap) |
3218 | { | |
1cfd19ae | 3219 | mv_stop_edma(ap); |
c4de573b | 3220 | mv_enable_port_irqs(ap, 0); |
bdd4ddde JG |
3221 | } |
3222 | ||
3223 | static void mv_eh_thaw(struct ata_port *ap) | |
3224 | { | |
f351b2d6 | 3225 | struct mv_host_priv *hpriv = ap->host->private_data; |
c4de573b ML |
3226 | unsigned int port = ap->port_no; |
3227 | unsigned int hardport = mv_hardport_from_port(port); | |
1cfd19ae | 3228 | void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); |
bdd4ddde | 3229 | void __iomem *port_mmio = mv_ap_base(ap); |
c4de573b | 3230 | u32 hc_irq_cause; |
bdd4ddde | 3231 | |
bdd4ddde JG |
3232 | /* clear EDMA errors on this port */ |
3233 | writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | |
3234 | ||
3235 | /* clear pending irq events */ | |
cae6edc3 | 3236 | hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); |
1cfd19ae | 3237 | writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); |
bdd4ddde | 3238 | |
88e675e1 | 3239 | mv_enable_port_irqs(ap, ERR_IRQ); |
31961943 BR |
3240 | } |
3241 | ||
05b308e1 BR |
3242 | /** |
3243 | * mv_port_init - Perform some early initialization on a single port. | |
3244 | * @port: libata data structure storing shadow register addresses | |
3245 | * @port_mmio: base address of the port | |
3246 | * | |
3247 | * Initialize shadow register mmio addresses, clear outstanding | |
3248 | * interrupts on the port, and unmask interrupts for the future | |
3249 | * start of the port. | |
3250 | * | |
3251 | * LOCKING: | |
3252 | * Inherited from caller. | |
3253 | */ | |
31961943 | 3254 | static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) |
20f733e7 | 3255 | { |
0d5ff566 | 3256 | void __iomem *shd_base = port_mmio + SHD_BLK_OFS; |
31961943 BR |
3257 | unsigned serr_ofs; |
3258 | ||
8b260248 | 3259 | /* PIO related setup |
31961943 BR |
3260 | */ |
3261 | port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); | |
8b260248 | 3262 | port->error_addr = |
31961943 BR |
3263 | port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); |
3264 | port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); | |
3265 | port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); | |
3266 | port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); | |
3267 | port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); | |
3268 | port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); | |
8b260248 | 3269 | port->status_addr = |
31961943 BR |
3270 | port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); |
3271 | /* special case: control/altstatus doesn't have ATA_REG_ address */ | |
3272 | port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; | |
3273 | ||
3274 | /* unused: */ | |
8d9db2d2 | 3275 | port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL; |
20f733e7 | 3276 | |
31961943 BR |
3277 | /* Clear any currently outstanding port interrupt conditions */ |
3278 | serr_ofs = mv_scr_offset(SCR_ERROR); | |
3279 | writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); | |
3280 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | |
3281 | ||
646a4da5 ML |
3282 | /* unmask all non-transient EDMA error interrupts */ |
3283 | writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS); | |
20f733e7 | 3284 | |
8b260248 | 3285 | VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", |
31961943 BR |
3286 | readl(port_mmio + EDMA_CFG_OFS), |
3287 | readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS), | |
3288 | readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); | |
20f733e7 BR |
3289 | } |
3290 | ||
616d4a98 ML |
3291 | static unsigned int mv_in_pcix_mode(struct ata_host *host) |
3292 | { | |
3293 | struct mv_host_priv *hpriv = host->private_data; | |
3294 | void __iomem *mmio = hpriv->base; | |
3295 | u32 reg; | |
3296 | ||
1f398472 | 3297 | if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) |
616d4a98 ML |
3298 | return 0; /* not PCI-X capable */ |
3299 | reg = readl(mmio + MV_PCI_MODE_OFS); | |
3300 | if ((reg & MV_PCI_MODE_MASK) == 0) | |
3301 | return 0; /* conventional PCI mode */ | |
3302 | return 1; /* chip is in PCI-X mode */ | |
3303 | } | |
3304 | ||
3305 | static int mv_pci_cut_through_okay(struct ata_host *host) | |
3306 | { | |
3307 | struct mv_host_priv *hpriv = host->private_data; | |
3308 | void __iomem *mmio = hpriv->base; | |
3309 | u32 reg; | |
3310 | ||
3311 | if (!mv_in_pcix_mode(host)) { | |
3312 | reg = readl(mmio + PCI_COMMAND_OFS); | |
3313 | if (reg & PCI_COMMAND_MRDTRIG) | |
3314 | return 0; /* not okay */ | |
3315 | } | |
3316 | return 1; /* okay */ | |
3317 | } | |
3318 | ||
4447d351 | 3319 | static int mv_chip_id(struct ata_host *host, unsigned int board_idx) |
bca1c4eb | 3320 | { |
4447d351 TH |
3321 | struct pci_dev *pdev = to_pci_dev(host->dev); |
3322 | struct mv_host_priv *hpriv = host->private_data; | |
bca1c4eb JG |
3323 | u32 hp_flags = hpriv->hp_flags; |
3324 | ||
5796d1c4 | 3325 | switch (board_idx) { |
47c2b677 JG |
3326 | case chip_5080: |
3327 | hpriv->ops = &mv5xxx_ops; | |
ee9ccdf7 | 3328 | hp_flags |= MV_HP_GEN_I; |
47c2b677 | 3329 | |
44c10138 | 3330 | switch (pdev->revision) { |
47c2b677 JG |
3331 | case 0x1: |
3332 | hp_flags |= MV_HP_ERRATA_50XXB0; | |
3333 | break; | |
3334 | case 0x3: | |
3335 | hp_flags |= MV_HP_ERRATA_50XXB2; | |
3336 | break; | |
3337 | default: | |
3338 | dev_printk(KERN_WARNING, &pdev->dev, | |
3339 | "Applying 50XXB2 workarounds to unknown rev\n"); | |
3340 | hp_flags |= MV_HP_ERRATA_50XXB2; | |
3341 | break; | |
3342 | } | |
3343 | break; | |
3344 | ||
bca1c4eb JG |
3345 | case chip_504x: |
3346 | case chip_508x: | |
47c2b677 | 3347 | hpriv->ops = &mv5xxx_ops; |
ee9ccdf7 | 3348 | hp_flags |= MV_HP_GEN_I; |
bca1c4eb | 3349 | |
44c10138 | 3350 | switch (pdev->revision) { |
47c2b677 JG |
3351 | case 0x0: |
3352 | hp_flags |= MV_HP_ERRATA_50XXB0; | |
3353 | break; | |
3354 | case 0x3: | |
3355 | hp_flags |= MV_HP_ERRATA_50XXB2; | |
3356 | break; | |
3357 | default: | |
3358 | dev_printk(KERN_WARNING, &pdev->dev, | |
3359 | "Applying B2 workarounds to unknown rev\n"); | |
3360 | hp_flags |= MV_HP_ERRATA_50XXB2; | |
3361 | break; | |
bca1c4eb JG |
3362 | } |
3363 | break; | |
3364 | ||
3365 | case chip_604x: | |
3366 | case chip_608x: | |
47c2b677 | 3367 | hpriv->ops = &mv6xxx_ops; |
ee9ccdf7 | 3368 | hp_flags |= MV_HP_GEN_II; |
47c2b677 | 3369 | |
44c10138 | 3370 | switch (pdev->revision) { |
47c2b677 JG |
3371 | case 0x7: |
3372 | hp_flags |= MV_HP_ERRATA_60X1B2; | |
3373 | break; | |
3374 | case 0x9: | |
3375 | hp_flags |= MV_HP_ERRATA_60X1C0; | |
bca1c4eb JG |
3376 | break; |
3377 | default: | |
3378 | dev_printk(KERN_WARNING, &pdev->dev, | |
47c2b677 JG |
3379 | "Applying B2 workarounds to unknown rev\n"); |
3380 | hp_flags |= MV_HP_ERRATA_60X1B2; | |
bca1c4eb JG |
3381 | break; |
3382 | } | |
3383 | break; | |
3384 | ||
e4e7b892 | 3385 | case chip_7042: |
616d4a98 | 3386 | hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; |
306b30f7 ML |
3387 | if (pdev->vendor == PCI_VENDOR_ID_TTI && |
3388 | (pdev->device == 0x2300 || pdev->device == 0x2310)) | |
3389 | { | |
4e520033 ML |
3390 | /* |
3391 | * Highpoint RocketRAID PCIe 23xx series cards: | |
3392 | * | |
3393 | * Unconfigured drives are treated as "Legacy" | |
3394 | * by the BIOS, and it overwrites sector 8 with | |
3395 | * a "Lgcy" metadata block prior to Linux boot. | |
3396 | * | |
3397 | * Configured drives (RAID or JBOD) leave sector 8 | |
3398 | * alone, but instead overwrite a high numbered | |
3399 | * sector for the RAID metadata. This sector can | |
3400 | * be determined exactly, by truncating the physical | |
3401 | * drive capacity to a nice even GB value. | |
3402 | * | |
3403 | * RAID metadata is at: (dev->n_sectors & ~0xfffff) | |
3404 | * | |
3405 | * Warn the user, lest they think we're just buggy. | |
3406 | */ | |
3407 | printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID" | |
3408 | " BIOS CORRUPTS DATA on all attached drives," | |
3409 | " regardless of if/how they are configured." | |
3410 | " BEWARE!\n"); | |
3411 | printk(KERN_WARNING DRV_NAME ": For data safety, do not" | |
3412 | " use sectors 8-9 on \"Legacy\" drives," | |
3413 | " and avoid the final two gigabytes on" | |
3414 | " all RocketRAID BIOS initialized drives.\n"); | |
306b30f7 | 3415 | } |
8e7decdb | 3416 | /* drop through */ |
e4e7b892 JG |
3417 | case chip_6042: |
3418 | hpriv->ops = &mv6xxx_ops; | |
e4e7b892 | 3419 | hp_flags |= MV_HP_GEN_IIE; |
616d4a98 ML |
3420 | if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) |
3421 | hp_flags |= MV_HP_CUT_THROUGH; | |
e4e7b892 | 3422 | |
44c10138 | 3423 | switch (pdev->revision) { |
5cf73bfb | 3424 | case 0x2: /* Rev.B0: the first/only public release */ |
e4e7b892 JG |
3425 | hp_flags |= MV_HP_ERRATA_60X1C0; |
3426 | break; | |
3427 | default: | |
3428 | dev_printk(KERN_WARNING, &pdev->dev, | |
3429 | "Applying 60X1C0 workarounds to unknown rev\n"); | |
3430 | hp_flags |= MV_HP_ERRATA_60X1C0; | |
3431 | break; | |
3432 | } | |
3433 | break; | |
f351b2d6 SB |
3434 | case chip_soc: |
3435 | hpriv->ops = &mv_soc_ops; | |
eb3a55a9 SB |
3436 | hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE | |
3437 | MV_HP_ERRATA_60X1C0; | |
f351b2d6 | 3438 | break; |
e4e7b892 | 3439 | |
bca1c4eb | 3440 | default: |
f351b2d6 | 3441 | dev_printk(KERN_ERR, host->dev, |
5796d1c4 | 3442 | "BUG: invalid board index %u\n", board_idx); |
bca1c4eb JG |
3443 | return 1; |
3444 | } | |
3445 | ||
3446 | hpriv->hp_flags = hp_flags; | |
02a121da ML |
3447 | if (hp_flags & MV_HP_PCIE) { |
3448 | hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS; | |
3449 | hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS; | |
3450 | hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; | |
3451 | } else { | |
3452 | hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS; | |
3453 | hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS; | |
3454 | hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; | |
3455 | } | |
bca1c4eb JG |
3456 | |
3457 | return 0; | |
3458 | } | |
3459 | ||
05b308e1 | 3460 | /** |
47c2b677 | 3461 | * mv_init_host - Perform some early initialization of the host. |
4447d351 TH |
3462 | * @host: ATA host to initialize |
3463 | * @board_idx: controller index | |
05b308e1 BR |
3464 | * |
3465 | * If possible, do an early global reset of the host. Then do | |
3466 | * our port init and clear/unmask all/relevant host interrupts. | |
3467 | * | |
3468 | * LOCKING: | |
3469 | * Inherited from caller. | |
3470 | */ | |
4447d351 | 3471 | static int mv_init_host(struct ata_host *host, unsigned int board_idx) |
20f733e7 BR |
3472 | { |
3473 | int rc = 0, n_hc, port, hc; | |
4447d351 | 3474 | struct mv_host_priv *hpriv = host->private_data; |
f351b2d6 | 3475 | void __iomem *mmio = hpriv->base; |
47c2b677 | 3476 | |
4447d351 | 3477 | rc = mv_chip_id(host, board_idx); |
bca1c4eb | 3478 | if (rc) |
352fab70 | 3479 | goto done; |
f351b2d6 | 3480 | |
1f398472 | 3481 | if (IS_SOC(hpriv)) { |
7368f919 ML |
3482 | hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS; |
3483 | hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS; | |
1f398472 ML |
3484 | } else { |
3485 | hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS; | |
3486 | hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; | |
f351b2d6 | 3487 | } |
352fab70 | 3488 | |
5d0fb2e7 TR |
3489 | /* initialize shadow irq mask with register's value */ |
3490 | hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr); | |
3491 | ||
352fab70 | 3492 | /* global interrupt mask: 0 == mask everything */ |
c4de573b | 3493 | mv_set_main_irq_mask(host, ~0, 0); |
bca1c4eb | 3494 | |
4447d351 | 3495 | n_hc = mv_get_hc_count(host->ports[0]->flags); |
bca1c4eb | 3496 | |
4447d351 | 3497 | for (port = 0; port < host->n_ports; port++) |
47c2b677 | 3498 | hpriv->ops->read_preamp(hpriv, port, mmio); |
20f733e7 | 3499 | |
c9d39130 | 3500 | rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); |
47c2b677 | 3501 | if (rc) |
20f733e7 | 3502 | goto done; |
20f733e7 | 3503 | |
522479fb | 3504 | hpriv->ops->reset_flash(hpriv, mmio); |
7bb3c529 | 3505 | hpriv->ops->reset_bus(host, mmio); |
47c2b677 | 3506 | hpriv->ops->enable_leds(hpriv, mmio); |
20f733e7 | 3507 | |
4447d351 | 3508 | for (port = 0; port < host->n_ports; port++) { |
cbcdd875 | 3509 | struct ata_port *ap = host->ports[port]; |
2a47ce06 | 3510 | void __iomem *port_mmio = mv_port_base(mmio, port); |
cbcdd875 TH |
3511 | |
3512 | mv_port_init(&ap->ioaddr, port_mmio); | |
3513 | ||
7bb3c529 | 3514 | #ifdef CONFIG_PCI |
1f398472 | 3515 | if (!IS_SOC(hpriv)) { |
f351b2d6 SB |
3516 | unsigned int offset = port_mmio - mmio; |
3517 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); | |
3518 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); | |
3519 | } | |
7bb3c529 | 3520 | #endif |
20f733e7 BR |
3521 | } |
3522 | ||
3523 | for (hc = 0; hc < n_hc; hc++) { | |
31961943 BR |
3524 | void __iomem *hc_mmio = mv_hc_base(mmio, hc); |
3525 | ||
3526 | VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " | |
3527 | "(before clear)=0x%08x\n", hc, | |
3528 | readl(hc_mmio + HC_CFG_OFS), | |
3529 | readl(hc_mmio + HC_IRQ_CAUSE_OFS)); | |
3530 | ||
3531 | /* Clear any currently outstanding hc interrupt conditions */ | |
3532 | writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); | |
20f733e7 BR |
3533 | } |
3534 | ||
6be96ac1 ML |
3535 | /* Clear any currently outstanding host interrupt conditions */ |
3536 | writelfl(0, mmio + hpriv->irq_cause_ofs); | |
31961943 | 3537 | |
6be96ac1 ML |
3538 | /* and unmask interrupt generation for host regs */ |
3539 | writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); | |
51de32d2 | 3540 | |
6be96ac1 ML |
3541 | /* |
3542 | * enable only global host interrupts for now. | |
3543 | * The per-port interrupts get done later as ports are set up. | |
3544 | */ | |
3545 | mv_set_main_irq_mask(host, 0, PCI_ERR); | |
f351b2d6 SB |
3546 | done: |
3547 | return rc; | |
3548 | } | |
fb621e2f | 3549 | |
fbf14e2f BB |
3550 | static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) |
3551 | { | |
3552 | hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, | |
3553 | MV_CRQB_Q_SZ, 0); | |
3554 | if (!hpriv->crqb_pool) | |
3555 | return -ENOMEM; | |
3556 | ||
3557 | hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, | |
3558 | MV_CRPB_Q_SZ, 0); | |
3559 | if (!hpriv->crpb_pool) | |
3560 | return -ENOMEM; | |
3561 | ||
3562 | hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, | |
3563 | MV_SG_TBL_SZ, 0); | |
3564 | if (!hpriv->sg_tbl_pool) | |
3565 | return -ENOMEM; | |
3566 | ||
3567 | return 0; | |
3568 | } | |
3569 | ||
15a32632 LB |
3570 | static void mv_conf_mbus_windows(struct mv_host_priv *hpriv, |
3571 | struct mbus_dram_target_info *dram) | |
3572 | { | |
3573 | int i; | |
3574 | ||
3575 | for (i = 0; i < 4; i++) { | |
3576 | writel(0, hpriv->base + WINDOW_CTRL(i)); | |
3577 | writel(0, hpriv->base + WINDOW_BASE(i)); | |
3578 | } | |
3579 | ||
3580 | for (i = 0; i < dram->num_cs; i++) { | |
3581 | struct mbus_dram_window *cs = dram->cs + i; | |
3582 | ||
3583 | writel(((cs->size - 1) & 0xffff0000) | | |
3584 | (cs->mbus_attr << 8) | | |
3585 | (dram->mbus_dram_target_id << 4) | 1, | |
3586 | hpriv->base + WINDOW_CTRL(i)); | |
3587 | writel(cs->base, hpriv->base + WINDOW_BASE(i)); | |
3588 | } | |
3589 | } | |
3590 | ||
f351b2d6 SB |
3591 | /** |
3592 | * mv_platform_probe - handle a positive probe of an soc Marvell | |
3593 | * host | |
3594 | * @pdev: platform device found | |
3595 | * | |
3596 | * LOCKING: | |
3597 | * Inherited from caller. | |
3598 | */ | |
3599 | static int mv_platform_probe(struct platform_device *pdev) | |
3600 | { | |
3601 | static int printed_version; | |
3602 | const struct mv_sata_platform_data *mv_platform_data; | |
3603 | const struct ata_port_info *ppi[] = | |
3604 | { &mv_port_info[chip_soc], NULL }; | |
3605 | struct ata_host *host; | |
3606 | struct mv_host_priv *hpriv; | |
3607 | struct resource *res; | |
3608 | int n_ports, rc; | |
20f733e7 | 3609 | |
f351b2d6 SB |
3610 | if (!printed_version++) |
3611 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); | |
bca1c4eb | 3612 | |
f351b2d6 SB |
3613 | /* |
3614 | * Simple resource validation .. | |
3615 | */ | |
3616 | if (unlikely(pdev->num_resources != 2)) { | |
3617 | dev_err(&pdev->dev, "invalid number of resources\n"); | |
3618 | return -EINVAL; | |
3619 | } | |
3620 | ||
3621 | /* | |
3622 | * Get the register base first | |
3623 | */ | |
3624 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
3625 | if (res == NULL) | |
3626 | return -EINVAL; | |
3627 | ||
3628 | /* allocate host */ | |
3629 | mv_platform_data = pdev->dev.platform_data; | |
3630 | n_ports = mv_platform_data->n_ports; | |
3631 | ||
3632 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); | |
3633 | hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); | |
3634 | ||
3635 | if (!host || !hpriv) | |
3636 | return -ENOMEM; | |
3637 | host->private_data = hpriv; | |
3638 | hpriv->n_ports = n_ports; | |
3639 | ||
3640 | host->iomap = NULL; | |
f1cb0ea1 SB |
3641 | hpriv->base = devm_ioremap(&pdev->dev, res->start, |
3642 | res->end - res->start + 1); | |
f351b2d6 SB |
3643 | hpriv->base -= MV_SATAHC0_REG_BASE; |
3644 | ||
15a32632 LB |
3645 | /* |
3646 | * (Re-)program MBUS remapping windows if we are asked to. | |
3647 | */ | |
3648 | if (mv_platform_data->dram != NULL) | |
3649 | mv_conf_mbus_windows(hpriv, mv_platform_data->dram); | |
3650 | ||
fbf14e2f BB |
3651 | rc = mv_create_dma_pools(hpriv, &pdev->dev); |
3652 | if (rc) | |
3653 | return rc; | |
3654 | ||
f351b2d6 SB |
3655 | /* initialize adapter */ |
3656 | rc = mv_init_host(host, chip_soc); | |
3657 | if (rc) | |
3658 | return rc; | |
3659 | ||
3660 | dev_printk(KERN_INFO, &pdev->dev, | |
3661 | "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH, | |
3662 | host->n_ports); | |
3663 | ||
3664 | return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt, | |
3665 | IRQF_SHARED, &mv6_sht); | |
3666 | } | |
3667 | ||
3668 | /* | |
3669 | * | |
3670 | * mv_platform_remove - unplug a platform interface | |
3671 | * @pdev: platform device | |
3672 | * | |
3673 | * A platform bus SATA device has been unplugged. Perform the needed | |
3674 | * cleanup. Also called on module unload for any active devices. | |
3675 | */ | |
3676 | static int __devexit mv_platform_remove(struct platform_device *pdev) | |
3677 | { | |
3678 | struct device *dev = &pdev->dev; | |
3679 | struct ata_host *host = dev_get_drvdata(dev); | |
f351b2d6 SB |
3680 | |
3681 | ata_host_detach(host); | |
f351b2d6 | 3682 | return 0; |
20f733e7 BR |
3683 | } |
3684 | ||
f351b2d6 SB |
3685 | static struct platform_driver mv_platform_driver = { |
3686 | .probe = mv_platform_probe, | |
3687 | .remove = __devexit_p(mv_platform_remove), | |
3688 | .driver = { | |
3689 | .name = DRV_NAME, | |
3690 | .owner = THIS_MODULE, | |
3691 | }, | |
3692 | }; | |
3693 | ||
3694 | ||
7bb3c529 | 3695 | #ifdef CONFIG_PCI |
f351b2d6 SB |
3696 | static int mv_pci_init_one(struct pci_dev *pdev, |
3697 | const struct pci_device_id *ent); | |
3698 | ||
7bb3c529 SB |
3699 | |
3700 | static struct pci_driver mv_pci_driver = { | |
3701 | .name = DRV_NAME, | |
3702 | .id_table = mv_pci_tbl, | |
f351b2d6 | 3703 | .probe = mv_pci_init_one, |
7bb3c529 SB |
3704 | .remove = ata_pci_remove_one, |
3705 | }; | |
3706 | ||
3707 | /* | |
3708 | * module options | |
3709 | */ | |
3710 | static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ | |
3711 | ||
3712 | ||
3713 | /* move to PCI layer or libata core? */ | |
3714 | static int pci_go_64(struct pci_dev *pdev) | |
3715 | { | |
3716 | int rc; | |
3717 | ||
3718 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { | |
3719 | rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | |
3720 | if (rc) { | |
3721 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | |
3722 | if (rc) { | |
3723 | dev_printk(KERN_ERR, &pdev->dev, | |
3724 | "64-bit DMA enable failed\n"); | |
3725 | return rc; | |
3726 | } | |
3727 | } | |
3728 | } else { | |
3729 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | |
3730 | if (rc) { | |
3731 | dev_printk(KERN_ERR, &pdev->dev, | |
3732 | "32-bit DMA enable failed\n"); | |
3733 | return rc; | |
3734 | } | |
3735 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | |
3736 | if (rc) { | |
3737 | dev_printk(KERN_ERR, &pdev->dev, | |
3738 | "32-bit consistent DMA enable failed\n"); | |
3739 | return rc; | |
3740 | } | |
3741 | } | |
3742 | ||
3743 | return rc; | |
3744 | } | |
3745 | ||
05b308e1 BR |
3746 | /** |
3747 | * mv_print_info - Dump key info to kernel log for perusal. | |
4447d351 | 3748 | * @host: ATA host to print info about |
05b308e1 BR |
3749 | * |
3750 | * FIXME: complete this. | |
3751 | * | |
3752 | * LOCKING: | |
3753 | * Inherited from caller. | |
3754 | */ | |
4447d351 | 3755 | static void mv_print_info(struct ata_host *host) |
31961943 | 3756 | { |
4447d351 TH |
3757 | struct pci_dev *pdev = to_pci_dev(host->dev); |
3758 | struct mv_host_priv *hpriv = host->private_data; | |
44c10138 | 3759 | u8 scc; |
c1e4fe71 | 3760 | const char *scc_s, *gen; |
31961943 BR |
3761 | |
3762 | /* Use this to determine the HW stepping of the chip so we know | |
3763 | * what errata to workaround | |
3764 | */ | |
31961943 BR |
3765 | pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); |
3766 | if (scc == 0) | |
3767 | scc_s = "SCSI"; | |
3768 | else if (scc == 0x01) | |
3769 | scc_s = "RAID"; | |
3770 | else | |
c1e4fe71 JG |
3771 | scc_s = "?"; |
3772 | ||
3773 | if (IS_GEN_I(hpriv)) | |
3774 | gen = "I"; | |
3775 | else if (IS_GEN_II(hpriv)) | |
3776 | gen = "II"; | |
3777 | else if (IS_GEN_IIE(hpriv)) | |
3778 | gen = "IIE"; | |
3779 | else | |
3780 | gen = "?"; | |
31961943 | 3781 | |
a9524a76 | 3782 | dev_printk(KERN_INFO, &pdev->dev, |
c1e4fe71 JG |
3783 | "Gen-%s %u slots %u ports %s mode IRQ via %s\n", |
3784 | gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, | |
31961943 BR |
3785 | scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); |
3786 | } | |
3787 | ||
05b308e1 | 3788 | /** |
f351b2d6 | 3789 | * mv_pci_init_one - handle a positive probe of a PCI Marvell host |
05b308e1 BR |
3790 | * @pdev: PCI device found |
3791 | * @ent: PCI device ID entry for the matched host | |
3792 | * | |
3793 | * LOCKING: | |
3794 | * Inherited from caller. | |
3795 | */ | |
f351b2d6 SB |
3796 | static int mv_pci_init_one(struct pci_dev *pdev, |
3797 | const struct pci_device_id *ent) | |
20f733e7 | 3798 | { |
2dcb407e | 3799 | static int printed_version; |
20f733e7 | 3800 | unsigned int board_idx = (unsigned int)ent->driver_data; |
4447d351 TH |
3801 | const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; |
3802 | struct ata_host *host; | |
3803 | struct mv_host_priv *hpriv; | |
3804 | int n_ports, rc; | |
20f733e7 | 3805 | |
a9524a76 JG |
3806 | if (!printed_version++) |
3807 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); | |
20f733e7 | 3808 | |
4447d351 TH |
3809 | /* allocate host */ |
3810 | n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC; | |
3811 | ||
3812 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); | |
3813 | hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); | |
3814 | if (!host || !hpriv) | |
3815 | return -ENOMEM; | |
3816 | host->private_data = hpriv; | |
f351b2d6 | 3817 | hpriv->n_ports = n_ports; |
4447d351 TH |
3818 | |
3819 | /* acquire resources */ | |
24dc5f33 TH |
3820 | rc = pcim_enable_device(pdev); |
3821 | if (rc) | |
20f733e7 | 3822 | return rc; |
20f733e7 | 3823 | |
0d5ff566 TH |
3824 | rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME); |
3825 | if (rc == -EBUSY) | |
24dc5f33 | 3826 | pcim_pin_device(pdev); |
0d5ff566 | 3827 | if (rc) |
24dc5f33 | 3828 | return rc; |
4447d351 | 3829 | host->iomap = pcim_iomap_table(pdev); |
f351b2d6 | 3830 | hpriv->base = host->iomap[MV_PRIMARY_BAR]; |
20f733e7 | 3831 | |
d88184fb JG |
3832 | rc = pci_go_64(pdev); |
3833 | if (rc) | |
3834 | return rc; | |
3835 | ||
da2fa9ba ML |
3836 | rc = mv_create_dma_pools(hpriv, &pdev->dev); |
3837 | if (rc) | |
3838 | return rc; | |
3839 | ||
20f733e7 | 3840 | /* initialize adapter */ |
4447d351 | 3841 | rc = mv_init_host(host, board_idx); |
24dc5f33 TH |
3842 | if (rc) |
3843 | return rc; | |
20f733e7 | 3844 | |
6d3c30ef ML |
3845 | /* Enable message-switched interrupts, if requested */ |
3846 | if (msi && pci_enable_msi(pdev) == 0) | |
3847 | hpriv->hp_flags |= MV_HP_FLAG_MSI; | |
20f733e7 | 3848 | |
31961943 | 3849 | mv_dump_pci_cfg(pdev, 0x68); |
4447d351 | 3850 | mv_print_info(host); |
20f733e7 | 3851 | |
4447d351 | 3852 | pci_set_master(pdev); |
ea8b4db9 | 3853 | pci_try_set_mwi(pdev); |
4447d351 | 3854 | return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, |
c5d3e45a | 3855 | IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); |
20f733e7 | 3856 | } |
7bb3c529 | 3857 | #endif |
20f733e7 | 3858 | |
f351b2d6 SB |
3859 | static int mv_platform_probe(struct platform_device *pdev); |
3860 | static int __devexit mv_platform_remove(struct platform_device *pdev); | |
3861 | ||
20f733e7 BR |
3862 | static int __init mv_init(void) |
3863 | { | |
7bb3c529 SB |
3864 | int rc = -ENODEV; |
3865 | #ifdef CONFIG_PCI | |
3866 | rc = pci_register_driver(&mv_pci_driver); | |
f351b2d6 SB |
3867 | if (rc < 0) |
3868 | return rc; | |
3869 | #endif | |
3870 | rc = platform_driver_register(&mv_platform_driver); | |
3871 | ||
3872 | #ifdef CONFIG_PCI | |
3873 | if (rc < 0) | |
3874 | pci_unregister_driver(&mv_pci_driver); | |
7bb3c529 SB |
3875 | #endif |
3876 | return rc; | |
20f733e7 BR |
3877 | } |
3878 | ||
3879 | static void __exit mv_exit(void) | |
3880 | { | |
7bb3c529 | 3881 | #ifdef CONFIG_PCI |
20f733e7 | 3882 | pci_unregister_driver(&mv_pci_driver); |
7bb3c529 | 3883 | #endif |
f351b2d6 | 3884 | platform_driver_unregister(&mv_platform_driver); |
20f733e7 BR |
3885 | } |
3886 | ||
3887 | MODULE_AUTHOR("Brett Russ"); | |
3888 | MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); | |
3889 | MODULE_LICENSE("GPL"); | |
3890 | MODULE_DEVICE_TABLE(pci, mv_pci_tbl); | |
3891 | MODULE_VERSION(DRV_VERSION); | |
17c5aab5 | 3892 | MODULE_ALIAS("platform:" DRV_NAME); |
20f733e7 | 3893 | |
7bb3c529 | 3894 | #ifdef CONFIG_PCI |
ddef9bb3 JG |
3895 | module_param(msi, int, 0444); |
3896 | MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); | |
7bb3c529 | 3897 | #endif |
ddef9bb3 | 3898 | |
20f733e7 BR |
3899 | module_init(mv_init); |
3900 | module_exit(mv_exit); |