]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/sata_mv.c
sata_mv: add RocketRaid 1720 PCI ID to driver
[net-next-2.6.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
e12bef50 4 * Copyright 2008: Marvell Corporation, all rights reserved.
8b260248 5 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 6 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
7 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
4a05e209 25/*
85afb934
ML
26 * sata_mv TODO list:
27 *
28 * --> Errata workaround for NCQ device errors.
29 *
30 * --> More errata workarounds for PCI-X.
31 *
32 * --> Complete a full errata audit for all chipsets to identify others.
33 *
34 * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35 *
36 * --> Investigate problems with PCI Message Signalled Interrupts (MSI).
37 *
38 * --> Cache frequently-accessed registers in mv_port_priv to reduce overhead.
39 *
40 * --> Develop a low-power-consumption strategy, and implement it.
41 *
42 * --> [Experiment, low priority] Investigate interrupt coalescing.
43 * Quite often, especially with PCI Message Signalled Interrupts (MSI),
44 * the overhead reduced by interrupt mitigation is quite often not
45 * worth the latency cost.
46 *
47 * --> [Experiment, Marvell value added] Is it possible to use target
48 * mode to cross-connect two Linux boxes with Marvell cards? If so,
49 * creating LibATA target mode support would be very interesting.
50 *
51 * Target mode, for those without docs, is the ability to directly
52 * connect two SATA ports.
53 */
4a05e209 54
20f733e7
BR
55#include <linux/kernel.h>
56#include <linux/module.h>
57#include <linux/pci.h>
58#include <linux/init.h>
59#include <linux/blkdev.h>
60#include <linux/delay.h>
61#include <linux/interrupt.h>
8d8b6004 62#include <linux/dmapool.h>
20f733e7 63#include <linux/dma-mapping.h>
a9524a76 64#include <linux/device.h>
f351b2d6
SB
65#include <linux/platform_device.h>
66#include <linux/ata_platform.h>
15a32632 67#include <linux/mbus.h>
c46938cc 68#include <linux/bitops.h>
20f733e7 69#include <scsi/scsi_host.h>
193515d5 70#include <scsi/scsi_cmnd.h>
6c08772e 71#include <scsi/scsi_device.h>
20f733e7 72#include <linux/libata.h>
20f733e7
BR
73
74#define DRV_NAME "sata_mv"
0388a8c0 75#define DRV_VERSION "1.24"
20f733e7
BR
76
77enum {
78 /* BAR's are enumerated in terms of pci_resource_start() terms */
79 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
80 MV_IO_BAR = 2, /* offset 0x18: IO space */
81 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
82
83 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
84 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
85
86 MV_PCI_REG_BASE = 0,
87 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
88 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
89 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
90 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
91 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
92 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
93
20f733e7 94 MV_SATAHC0_REG_BASE = 0x20000,
8e7decdb
ML
95 MV_FLASH_CTL_OFS = 0x1046c,
96 MV_GPIO_PORT_CTL_OFS = 0x104f0,
97 MV_RESET_CFG_OFS = 0x180d8,
20f733e7
BR
98
99 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
100 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
102 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
103
31961943
BR
104 MV_MAX_Q_DEPTH = 32,
105 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
106
107 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
108 * CRPB needs alignment on a 256B boundary. Size == 256B
31961943
BR
109 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
110 */
111 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
112 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
da2fa9ba 113 MV_MAX_SG_CT = 256,
31961943 114 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
31961943 115
352fab70 116 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
20f733e7 117 MV_PORT_HC_SHIFT = 2,
352fab70
ML
118 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
119 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
120 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
20f733e7
BR
121
122 /* Host Flags */
123 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
124 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
7bb3c529 125
c5d3e45a 126 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
127 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
128 ATA_FLAG_PIO_POLLING,
ad3aef51 129
47c2b677 130 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 131
ad3aef51
ML
132 MV_GENIIE_FLAGS = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
133 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
c443c500 134 ATA_FLAG_NCQ | ATA_FLAG_AN,
ad3aef51 135
31961943
BR
136 CRQB_FLAG_READ = (1 << 0),
137 CRQB_TAG_SHIFT = 1,
c5d3e45a 138 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
e12bef50 139 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
c5d3e45a 140 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
141 CRQB_CMD_ADDR_SHIFT = 8,
142 CRQB_CMD_CS = (0x2 << 11),
143 CRQB_CMD_LAST = (1 << 15),
144
145 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
146 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
147 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
148
149 EPRD_FLAG_END_OF_TBL = (1 << 31),
150
20f733e7
BR
151 /* PCI interface registers */
152
31961943 153 PCI_COMMAND_OFS = 0xc00,
8e7decdb 154 PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
31961943 155
20f733e7
BR
156 PCI_MAIN_CMD_STS_OFS = 0xd30,
157 STOP_PCI_MASTER = (1 << 2),
158 PCI_MASTER_EMPTY = (1 << 3),
159 GLOB_SFT_RST = (1 << 4),
160
8e7decdb
ML
161 MV_PCI_MODE_OFS = 0xd00,
162 MV_PCI_MODE_MASK = 0x30,
163
522479fb
JG
164 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
165 MV_PCI_DISC_TIMER = 0xd04,
166 MV_PCI_MSI_TRIGGER = 0xc38,
167 MV_PCI_SERR_MASK = 0xc28,
8e7decdb 168 MV_PCI_XBAR_TMOUT_OFS = 0x1d04,
522479fb
JG
169 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
170 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
171 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
172 MV_PCI_ERR_COMMAND = 0x1d50,
173
02a121da
ML
174 PCI_IRQ_CAUSE_OFS = 0x1d58,
175 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
176 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
177
02a121da
ML
178 PCIE_IRQ_CAUSE_OFS = 0x1900,
179 PCIE_IRQ_MASK_OFS = 0x1910,
646a4da5 180 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 181
7368f919
ML
182 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
183 PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64,
185 SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024,
352fab70
ML
187 ERR_IRQ = (1 << 0), /* shift by port # */
188 DONE_IRQ = (1 << 1), /* shift by port # */
20f733e7
BR
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
e12bef50 202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
20f733e7
BR
203
204 /* SATAHC registers */
205 HC_CFG_OFS = 0,
206
207 HC_IRQ_CAUSE_OFS = 0x14,
352fab70
ML
208 DMA_IRQ = (1 << 0), /* shift by port # */
209 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
20f733e7
BR
210 DEV_IRQ = (1 << 8), /* shift by port # */
211
212 /* Shadow block registers */
31961943
BR
213 SHD_BLK_OFS = 0x100,
214 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
215
216 /* SATA registers */
217 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
218 SATA_ACTIVE_OFS = 0x350,
0c58912e 219 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
c443c500 220 SATA_FIS_IRQ_AN = (1 << 9), /* async notification */
17c5aab5 221
e12bef50 222 LTMODE_OFS = 0x30c,
17c5aab5
ML
223 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
224
47c2b677 225 PHY_MODE3 = 0x310,
bca1c4eb 226 PHY_MODE4 = 0x314,
ba069e37
ML
227 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
228 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
229 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
230 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
231
bca1c4eb 232 PHY_MODE2 = 0x330,
e12bef50 233 SATA_IFCTL_OFS = 0x344,
8e7decdb 234 SATA_TESTCTL_OFS = 0x348,
e12bef50
ML
235 SATA_IFSTAT_OFS = 0x34c,
236 VENDOR_UNIQUE_FIS_OFS = 0x35c,
17c5aab5 237
8e7decdb
ML
238 FISCFG_OFS = 0x360,
239 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
240 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
17c5aab5 241
c9d39130 242 MV5_PHY_MODE = 0x74,
8e7decdb
ML
243 MV5_LTMODE_OFS = 0x30,
244 MV5_PHY_CTL_OFS = 0x0C,
245 SATA_INTERFACE_CFG_OFS = 0x050,
bca1c4eb
JG
246
247 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
248
249 /* Port registers */
250 EDMA_CFG_OFS = 0,
0c58912e
ML
251 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
252 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
253 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
254 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
255 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
e12bef50
ML
256 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
257 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
20f733e7
BR
258
259 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
260 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
261 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
262 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
263 EDMA_ERR_DEV = (1 << 2), /* device error */
264 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
265 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
266 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
267 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
268 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 269 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 270 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
271 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
272 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
273 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
274 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 275
6c1153e0 276 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
277 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
278 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
279 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
280 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
281
6c1153e0 282 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 283
6c1153e0 284 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
285 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
286 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
287 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
288 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
289 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
290
6c1153e0 291 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 292
6c1153e0 293 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
294 EDMA_ERR_OVERRUN_5 = (1 << 5),
295 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
296
297 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
298 EDMA_ERR_LNK_CTRL_RX_1 |
299 EDMA_ERR_LNK_CTRL_RX_3 |
85afb934 300 EDMA_ERR_LNK_CTRL_TX,
646a4da5 301
bdd4ddde
JG
302 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
303 EDMA_ERR_PRD_PAR |
304 EDMA_ERR_DEV_DCON |
305 EDMA_ERR_DEV_CON |
306 EDMA_ERR_SERR |
307 EDMA_ERR_SELF_DIS |
6c1153e0 308 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY |
312 EDMA_ERR_LNK_CTRL_RX_2 |
313 EDMA_ERR_LNK_DATA_RX |
314 EDMA_ERR_LNK_DATA_TX |
315 EDMA_ERR_TRANS_PROTO,
e12bef50 316
bdd4ddde
JG
317 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
318 EDMA_ERR_PRD_PAR |
319 EDMA_ERR_DEV_DCON |
320 EDMA_ERR_DEV_CON |
321 EDMA_ERR_OVERRUN_5 |
322 EDMA_ERR_UNDERRUN_5 |
323 EDMA_ERR_SELF_DIS_5 |
6c1153e0 324 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
325 EDMA_ERR_CRPB_PAR |
326 EDMA_ERR_INTRL_PAR |
327 EDMA_ERR_IORDY,
20f733e7 328
31961943
BR
329 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
330 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
331
332 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
333 EDMA_REQ_Q_PTR_SHIFT = 5,
334
335 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
336 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
337 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
338 EDMA_RSP_Q_PTR_SHIFT = 3,
339
0ea9e179
JG
340 EDMA_CMD_OFS = 0x28, /* EDMA command register */
341 EDMA_EN = (1 << 0), /* enable EDMA */
342 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
8e7decdb
ML
343 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
344
345 EDMA_STATUS_OFS = 0x30, /* EDMA engine status */
346 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
347 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
20f733e7 348
8e7decdb
ML
349 EDMA_IORDY_TMOUT_OFS = 0x34,
350 EDMA_ARB_CFG_OFS = 0x38,
351
352 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */
bca1c4eb 353
352fab70
ML
354 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
355
31961943
BR
356 /* Host private flags (hp_flags) */
357 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
358 MV_HP_ERRATA_50XXB0 = (1 << 1),
359 MV_HP_ERRATA_50XXB2 = (1 << 2),
360 MV_HP_ERRATA_60X1B2 = (1 << 3),
361 MV_HP_ERRATA_60X1C0 = (1 << 4),
0ea9e179
JG
362 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
363 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
364 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 365 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
616d4a98 366 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
1f398472 367 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
20f733e7 368
31961943 369 /* Port private flags (pp_flags) */
0ea9e179 370 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
72109168 371 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
00f42eab 372 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
29d187bb 373 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
20f733e7
BR
374};
375
ee9ccdf7
JG
376#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
377#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 378#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
8e7decdb 379#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
1f398472 380#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
bca1c4eb 381
15a32632
LB
382#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
383#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
384
095fec88 385enum {
baf14aa1
JG
386 /* DMA boundary 0xffff is required by the s/g splitting
387 * we need on /length/ in mv_fill-sg().
388 */
389 MV_DMA_BOUNDARY = 0xffffU,
095fec88 390
0ea9e179
JG
391 /* mask of register bits containing lower 32 bits
392 * of EDMA request queue DMA address
393 */
095fec88
JG
394 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
395
0ea9e179 396 /* ditto, for response queue */
095fec88
JG
397 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
398};
399
522479fb
JG
400enum chip_type {
401 chip_504x,
402 chip_508x,
403 chip_5080,
404 chip_604x,
405 chip_608x,
e4e7b892
JG
406 chip_6042,
407 chip_7042,
f351b2d6 408 chip_soc,
522479fb
JG
409};
410
31961943
BR
411/* Command ReQuest Block: 32B */
412struct mv_crqb {
e1469874
ML
413 __le32 sg_addr;
414 __le32 sg_addr_hi;
415 __le16 ctrl_flags;
416 __le16 ata_cmd[11];
31961943 417};
20f733e7 418
e4e7b892 419struct mv_crqb_iie {
e1469874
ML
420 __le32 addr;
421 __le32 addr_hi;
422 __le32 flags;
423 __le32 len;
424 __le32 ata_cmd[4];
e4e7b892
JG
425};
426
31961943
BR
427/* Command ResPonse Block: 8B */
428struct mv_crpb {
e1469874
ML
429 __le16 id;
430 __le16 flags;
431 __le32 tmstmp;
20f733e7
BR
432};
433
31961943
BR
434/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
435struct mv_sg {
e1469874
ML
436 __le32 addr;
437 __le32 flags_size;
438 __le32 addr_hi;
439 __le32 reserved;
31961943 440};
20f733e7 441
31961943
BR
442struct mv_port_priv {
443 struct mv_crqb *crqb;
444 dma_addr_t crqb_dma;
445 struct mv_crpb *crpb;
446 dma_addr_t crpb_dma;
eb73d558
ML
447 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
448 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
bdd4ddde
JG
449
450 unsigned int req_idx;
451 unsigned int resp_idx;
452
31961943 453 u32 pp_flags;
29d187bb 454 unsigned int delayed_eh_pmp_map;
31961943
BR
455};
456
bca1c4eb
JG
457struct mv_port_signal {
458 u32 amps;
459 u32 pre;
460};
461
02a121da
ML
462struct mv_host_priv {
463 u32 hp_flags;
96e2c487 464 u32 main_irq_mask;
02a121da
ML
465 struct mv_port_signal signal[8];
466 const struct mv_hw_ops *ops;
f351b2d6
SB
467 int n_ports;
468 void __iomem *base;
7368f919
ML
469 void __iomem *main_irq_cause_addr;
470 void __iomem *main_irq_mask_addr;
02a121da
ML
471 u32 irq_cause_ofs;
472 u32 irq_mask_ofs;
473 u32 unmask_all_irqs;
da2fa9ba
ML
474 /*
475 * These consistent DMA memory pools give us guaranteed
476 * alignment for hardware-accessed data structures,
477 * and less memory waste in accomplishing the alignment.
478 */
479 struct dma_pool *crqb_pool;
480 struct dma_pool *crpb_pool;
481 struct dma_pool *sg_tbl_pool;
02a121da
ML
482};
483
47c2b677 484struct mv_hw_ops {
2a47ce06
JG
485 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
486 unsigned int port);
47c2b677
JG
487 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
488 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
489 void __iomem *mmio);
c9d39130
JG
490 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
491 unsigned int n_hc);
522479fb 492 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 493 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
47c2b677
JG
494};
495
da3dbb17
TH
496static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
497static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
498static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
499static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
500static int mv_port_start(struct ata_port *ap);
501static void mv_port_stop(struct ata_port *ap);
3e4a1391 502static int mv_qc_defer(struct ata_queued_cmd *qc);
31961943 503static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 504static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 505static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
a1efdaba
TH
506static int mv_hardreset(struct ata_link *link, unsigned int *class,
507 unsigned long deadline);
bdd4ddde
JG
508static void mv_eh_freeze(struct ata_port *ap);
509static void mv_eh_thaw(struct ata_port *ap);
f273827e 510static void mv6_dev_config(struct ata_device *dev);
20f733e7 511
2a47ce06
JG
512static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
513 unsigned int port);
47c2b677
JG
514static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
515static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
516 void __iomem *mmio);
c9d39130
JG
517static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
518 unsigned int n_hc);
522479fb 519static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 520static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
47c2b677 521
2a47ce06
JG
522static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
523 unsigned int port);
47c2b677
JG
524static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
525static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
526 void __iomem *mmio);
c9d39130
JG
527static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
528 unsigned int n_hc);
522479fb 529static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
f351b2d6
SB
530static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
531 void __iomem *mmio);
532static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
533 void __iomem *mmio);
534static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
535 void __iomem *mmio, unsigned int n_hc);
536static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
537 void __iomem *mmio);
538static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
7bb3c529 539static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
e12bef50 540static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
c9d39130 541 unsigned int port_no);
e12bef50 542static int mv_stop_edma(struct ata_port *ap);
b562468c 543static int mv_stop_edma_engine(void __iomem *port_mmio);
e12bef50 544static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
47c2b677 545
e49856d8
ML
546static void mv_pmp_select(struct ata_port *ap, int pmp);
547static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
548 unsigned long deadline);
549static int mv_softreset(struct ata_link *link, unsigned int *class,
550 unsigned long deadline);
29d187bb 551static void mv_pmp_error_handler(struct ata_port *ap);
4c299ca3
ML
552static void mv_process_crpb_entries(struct ata_port *ap,
553 struct mv_port_priv *pp);
47c2b677 554
eb73d558
ML
555/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
556 * because we have to allow room for worst case splitting of
557 * PRDs for 64K boundaries in mv_fill_sg().
558 */
c5d3e45a 559static struct scsi_host_template mv5_sht = {
68d1d07b 560 ATA_BASE_SHT(DRV_NAME),
baf14aa1 561 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a 562 .dma_boundary = MV_DMA_BOUNDARY,
c5d3e45a
JG
563};
564
565static struct scsi_host_template mv6_sht = {
68d1d07b 566 ATA_NCQ_SHT(DRV_NAME),
138bfdd0 567 .can_queue = MV_MAX_Q_DEPTH - 1,
baf14aa1 568 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7 569 .dma_boundary = MV_DMA_BOUNDARY,
20f733e7
BR
570};
571
029cfd6b
TH
572static struct ata_port_operations mv5_ops = {
573 .inherits = &ata_sff_port_ops,
c9d39130 574
3e4a1391 575 .qc_defer = mv_qc_defer,
c9d39130
JG
576 .qc_prep = mv_qc_prep,
577 .qc_issue = mv_qc_issue,
c9d39130 578
bdd4ddde
JG
579 .freeze = mv_eh_freeze,
580 .thaw = mv_eh_thaw,
a1efdaba 581 .hardreset = mv_hardreset,
a1efdaba 582 .error_handler = ata_std_error_handler, /* avoid SFF EH */
029cfd6b 583 .post_internal_cmd = ATA_OP_NULL,
bdd4ddde 584
c9d39130
JG
585 .scr_read = mv5_scr_read,
586 .scr_write = mv5_scr_write,
587
588 .port_start = mv_port_start,
589 .port_stop = mv_port_stop,
c9d39130
JG
590};
591
029cfd6b
TH
592static struct ata_port_operations mv6_ops = {
593 .inherits = &mv5_ops,
f273827e 594 .dev_config = mv6_dev_config,
20f733e7
BR
595 .scr_read = mv_scr_read,
596 .scr_write = mv_scr_write,
597
e49856d8
ML
598 .pmp_hardreset = mv_pmp_hardreset,
599 .pmp_softreset = mv_softreset,
600 .softreset = mv_softreset,
29d187bb 601 .error_handler = mv_pmp_error_handler,
20f733e7
BR
602};
603
029cfd6b
TH
604static struct ata_port_operations mv_iie_ops = {
605 .inherits = &mv6_ops,
606 .dev_config = ATA_OP_NULL,
e4e7b892 607 .qc_prep = mv_qc_prep_iie,
e4e7b892
JG
608};
609
98ac62de 610static const struct ata_port_info mv_port_info[] = {
20f733e7 611 { /* chip_504x */
cca3974e 612 .flags = MV_COMMON_FLAGS,
31961943 613 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 614 .udma_mask = ATA_UDMA6,
c9d39130 615 .port_ops = &mv5_ops,
20f733e7
BR
616 },
617 { /* chip_508x */
c5d3e45a 618 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 619 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 620 .udma_mask = ATA_UDMA6,
c9d39130 621 .port_ops = &mv5_ops,
20f733e7 622 },
47c2b677 623 { /* chip_5080 */
c5d3e45a 624 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 625 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 626 .udma_mask = ATA_UDMA6,
c9d39130 627 .port_ops = &mv5_ops,
47c2b677 628 },
20f733e7 629 { /* chip_604x */
138bfdd0 630 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 631 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 632 ATA_FLAG_NCQ,
31961943 633 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 634 .udma_mask = ATA_UDMA6,
c9d39130 635 .port_ops = &mv6_ops,
20f733e7
BR
636 },
637 { /* chip_608x */
c5d3e45a 638 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 639 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 640 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
31961943 641 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 642 .udma_mask = ATA_UDMA6,
c9d39130 643 .port_ops = &mv6_ops,
20f733e7 644 },
e4e7b892 645 { /* chip_6042 */
ad3aef51 646 .flags = MV_GENIIE_FLAGS,
e4e7b892 647 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 648 .udma_mask = ATA_UDMA6,
e4e7b892
JG
649 .port_ops = &mv_iie_ops,
650 },
651 { /* chip_7042 */
ad3aef51 652 .flags = MV_GENIIE_FLAGS,
e4e7b892 653 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 654 .udma_mask = ATA_UDMA6,
e4e7b892
JG
655 .port_ops = &mv_iie_ops,
656 },
f351b2d6 657 { /* chip_soc */
1f398472 658 .flags = MV_GENIIE_FLAGS,
17c5aab5
ML
659 .pio_mask = 0x1f, /* pio0-4 */
660 .udma_mask = ATA_UDMA6,
661 .port_ops = &mv_iie_ops,
f351b2d6 662 },
20f733e7
BR
663};
664
3b7d697d 665static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
666 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
667 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
668 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
669 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
46c5784c
ML
670 /* RocketRAID 1720/174x have different identifiers */
671 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
cfbf723e
AC
672 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
673 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
674
675 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
676 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
677 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
678 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
679 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
680
681 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
682
d9f9c6bc
FA
683 /* Adaptec 1430SA */
684 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
685
02a121da 686 /* Marvell 7042 support */
6a3d586d
MT
687 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
688
02a121da
ML
689 /* Highpoint RocketRAID PCIe series */
690 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
691 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
692
2d2744fc 693 { } /* terminate list */
20f733e7
BR
694};
695
47c2b677
JG
696static const struct mv_hw_ops mv5xxx_ops = {
697 .phy_errata = mv5_phy_errata,
698 .enable_leds = mv5_enable_leds,
699 .read_preamp = mv5_read_preamp,
700 .reset_hc = mv5_reset_hc,
522479fb
JG
701 .reset_flash = mv5_reset_flash,
702 .reset_bus = mv5_reset_bus,
47c2b677
JG
703};
704
705static const struct mv_hw_ops mv6xxx_ops = {
706 .phy_errata = mv6_phy_errata,
707 .enable_leds = mv6_enable_leds,
708 .read_preamp = mv6_read_preamp,
709 .reset_hc = mv6_reset_hc,
522479fb
JG
710 .reset_flash = mv6_reset_flash,
711 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
712};
713
f351b2d6
SB
714static const struct mv_hw_ops mv_soc_ops = {
715 .phy_errata = mv6_phy_errata,
716 .enable_leds = mv_soc_enable_leds,
717 .read_preamp = mv_soc_read_preamp,
718 .reset_hc = mv_soc_reset_hc,
719 .reset_flash = mv_soc_reset_flash,
720 .reset_bus = mv_soc_reset_bus,
721};
722
20f733e7
BR
723/*
724 * Functions
725 */
726
727static inline void writelfl(unsigned long data, void __iomem *addr)
728{
729 writel(data, addr);
730 (void) readl(addr); /* flush to avoid PCI posted write */
731}
732
c9d39130
JG
733static inline unsigned int mv_hc_from_port(unsigned int port)
734{
735 return port >> MV_PORT_HC_SHIFT;
736}
737
738static inline unsigned int mv_hardport_from_port(unsigned int port)
739{
740 return port & MV_PORT_MASK;
741}
742
1cfd19ae
ML
743/*
744 * Consolidate some rather tricky bit shift calculations.
745 * This is hot-path stuff, so not a function.
746 * Simple code, with two return values, so macro rather than inline.
747 *
748 * port is the sole input, in range 0..7.
7368f919
ML
749 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
750 * hardport is the other output, in range 0..3.
1cfd19ae
ML
751 *
752 * Note that port and hardport may be the same variable in some cases.
753 */
754#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
755{ \
756 shift = mv_hc_from_port(port) * HC_SHIFT; \
757 hardport = mv_hardport_from_port(port); \
758 shift += hardport * 2; \
759}
760
352fab70
ML
761static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
762{
763 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
764}
765
c9d39130
JG
766static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
767 unsigned int port)
768{
769 return mv_hc_base(base, mv_hc_from_port(port));
770}
771
20f733e7
BR
772static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
773{
c9d39130 774 return mv_hc_base_from_port(base, port) +
8b260248 775 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 776 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
777}
778
e12bef50
ML
779static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
780{
781 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
782 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
783
784 return hc_mmio + ofs;
785}
786
f351b2d6
SB
787static inline void __iomem *mv_host_base(struct ata_host *host)
788{
789 struct mv_host_priv *hpriv = host->private_data;
790 return hpriv->base;
791}
792
20f733e7
BR
793static inline void __iomem *mv_ap_base(struct ata_port *ap)
794{
f351b2d6 795 return mv_port_base(mv_host_base(ap->host), ap->port_no);
20f733e7
BR
796}
797
cca3974e 798static inline int mv_get_hc_count(unsigned long port_flags)
31961943 799{
cca3974e 800 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
801}
802
c5d3e45a
JG
803static void mv_set_edma_ptrs(void __iomem *port_mmio,
804 struct mv_host_priv *hpriv,
805 struct mv_port_priv *pp)
806{
bdd4ddde
JG
807 u32 index;
808
c5d3e45a
JG
809 /*
810 * initialize request queue
811 */
fcfb1f77
ML
812 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
813 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
bdd4ddde 814
c5d3e45a
JG
815 WARN_ON(pp->crqb_dma & 0x3ff);
816 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 817 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a 818 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
5cf73bfb 819 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
820
821 /*
822 * initialize response queue
823 */
fcfb1f77
ML
824 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
825 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
bdd4ddde 826
c5d3e45a
JG
827 WARN_ON(pp->crpb_dma & 0xff);
828 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
5cf73bfb 829 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
bdd4ddde 830 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 831 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
832}
833
c4de573b
ML
834static void mv_set_main_irq_mask(struct ata_host *host,
835 u32 disable_bits, u32 enable_bits)
836{
837 struct mv_host_priv *hpriv = host->private_data;
838 u32 old_mask, new_mask;
839
96e2c487 840 old_mask = hpriv->main_irq_mask;
c4de573b 841 new_mask = (old_mask & ~disable_bits) | enable_bits;
96e2c487
ML
842 if (new_mask != old_mask) {
843 hpriv->main_irq_mask = new_mask;
c4de573b 844 writelfl(new_mask, hpriv->main_irq_mask_addr);
96e2c487 845 }
c4de573b
ML
846}
847
848static void mv_enable_port_irqs(struct ata_port *ap,
849 unsigned int port_bits)
850{
851 unsigned int shift, hardport, port = ap->port_no;
852 u32 disable_bits, enable_bits;
853
854 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
855
856 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
857 enable_bits = port_bits << shift;
858 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
859}
860
05b308e1
BR
861/**
862 * mv_start_dma - Enable eDMA engine
863 * @base: port base address
864 * @pp: port private data
865 *
beec7dbc
TH
866 * Verify the local cache of the eDMA state is accurate with a
867 * WARN_ON.
05b308e1
BR
868 *
869 * LOCKING:
870 * Inherited from caller.
871 */
0c58912e 872static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
72109168 873 struct mv_port_priv *pp, u8 protocol)
20f733e7 874{
72109168
ML
875 int want_ncq = (protocol == ATA_PROT_NCQ);
876
877 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
878 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
879 if (want_ncq != using_ncq)
b562468c 880 mv_stop_edma(ap);
72109168 881 }
c5d3e45a 882 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
0c58912e 883 struct mv_host_priv *hpriv = ap->host->private_data;
352fab70 884 int hardport = mv_hardport_from_port(ap->port_no);
0c58912e 885 void __iomem *hc_mmio = mv_hc_base_from_port(
352fab70 886 mv_host_base(ap->host), hardport);
0c58912e
ML
887 u32 hc_irq_cause, ipending;
888
bdd4ddde 889 /* clear EDMA event indicators, if any */
f630d562 890 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
bdd4ddde 891
0c58912e
ML
892 /* clear EDMA interrupt indicator, if any */
893 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
352fab70 894 ipending = (DEV_IRQ | DMA_IRQ) << hardport;
0c58912e
ML
895 if (hc_irq_cause & ipending) {
896 writelfl(hc_irq_cause & ~ipending,
897 hc_mmio + HC_IRQ_CAUSE_OFS);
898 }
899
e12bef50 900 mv_edma_cfg(ap, want_ncq);
0c58912e
ML
901
902 /* clear FIS IRQ Cause */
e4006077
ML
903 if (IS_GEN_IIE(hpriv))
904 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
0c58912e 905
f630d562 906 mv_set_edma_ptrs(port_mmio, hpriv, pp);
88e675e1 907 mv_enable_port_irqs(ap, DONE_IRQ|ERR_IRQ);
bdd4ddde 908
f630d562 909 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
afb0edd9
BR
910 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
911 }
20f733e7
BR
912}
913
9b2c4e0b
ML
914static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
915{
916 void __iomem *port_mmio = mv_ap_base(ap);
917 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
918 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
919 int i;
920
921 /*
922 * Wait for the EDMA engine to finish transactions in progress.
c46938cc
ML
923 * No idea what a good "timeout" value might be, but measurements
924 * indicate that it often requires hundreds of microseconds
925 * with two drives in-use. So we use the 15msec value above
926 * as a rough guess at what even more drives might require.
9b2c4e0b
ML
927 */
928 for (i = 0; i < timeout; ++i) {
929 u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS);
930 if ((edma_stat & empty_idle) == empty_idle)
931 break;
932 udelay(per_loop);
933 }
934 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
935}
936
05b308e1 937/**
e12bef50 938 * mv_stop_edma_engine - Disable eDMA engine
b562468c 939 * @port_mmio: io base address
05b308e1
BR
940 *
941 * LOCKING:
942 * Inherited from caller.
943 */
b562468c 944static int mv_stop_edma_engine(void __iomem *port_mmio)
20f733e7 945{
b562468c 946 int i;
31961943 947
b562468c
ML
948 /* Disable eDMA. The disable bit auto clears. */
949 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
8b260248 950
b562468c
ML
951 /* Wait for the chip to confirm eDMA is off. */
952 for (i = 10000; i > 0; i--) {
953 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 954 if (!(reg & EDMA_EN))
b562468c
ML
955 return 0;
956 udelay(10);
31961943 957 }
b562468c 958 return -EIO;
20f733e7
BR
959}
960
e12bef50 961static int mv_stop_edma(struct ata_port *ap)
0ea9e179 962{
b562468c
ML
963 void __iomem *port_mmio = mv_ap_base(ap);
964 struct mv_port_priv *pp = ap->private_data;
0ea9e179 965
b562468c
ML
966 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
967 return 0;
968 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
9b2c4e0b 969 mv_wait_for_edma_empty_idle(ap);
b562468c
ML
970 if (mv_stop_edma_engine(port_mmio)) {
971 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
972 return -EIO;
973 }
974 return 0;
0ea9e179
JG
975}
976
8a70f8dc 977#ifdef ATA_DEBUG
31961943 978static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 979{
31961943
BR
980 int b, w;
981 for (b = 0; b < bytes; ) {
982 DPRINTK("%p: ", start + b);
983 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 984 printk("%08x ", readl(start + b));
31961943
BR
985 b += sizeof(u32);
986 }
987 printk("\n");
988 }
31961943 989}
8a70f8dc
JG
990#endif
991
31961943
BR
992static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
993{
994#ifdef ATA_DEBUG
995 int b, w;
996 u32 dw;
997 for (b = 0; b < bytes; ) {
998 DPRINTK("%02x: ", b);
999 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
1000 (void) pci_read_config_dword(pdev, b, &dw);
1001 printk("%08x ", dw);
31961943
BR
1002 b += sizeof(u32);
1003 }
1004 printk("\n");
1005 }
1006#endif
1007}
1008static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1009 struct pci_dev *pdev)
1010{
1011#ifdef ATA_DEBUG
8b260248 1012 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
1013 port >> MV_PORT_HC_SHIFT);
1014 void __iomem *port_base;
1015 int start_port, num_ports, p, start_hc, num_hcs, hc;
1016
1017 if (0 > port) {
1018 start_hc = start_port = 0;
1019 num_ports = 8; /* shld be benign for 4 port devs */
1020 num_hcs = 2;
1021 } else {
1022 start_hc = port >> MV_PORT_HC_SHIFT;
1023 start_port = port;
1024 num_ports = num_hcs = 1;
1025 }
8b260248 1026 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
1027 num_ports > 1 ? num_ports - 1 : start_port);
1028
1029 if (NULL != pdev) {
1030 DPRINTK("PCI config space regs:\n");
1031 mv_dump_pci_cfg(pdev, 0x68);
1032 }
1033 DPRINTK("PCI regs:\n");
1034 mv_dump_mem(mmio_base+0xc00, 0x3c);
1035 mv_dump_mem(mmio_base+0xd00, 0x34);
1036 mv_dump_mem(mmio_base+0xf00, 0x4);
1037 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1038 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 1039 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
1040 DPRINTK("HC regs (HC %i):\n", hc);
1041 mv_dump_mem(hc_base, 0x1c);
1042 }
1043 for (p = start_port; p < start_port + num_ports; p++) {
1044 port_base = mv_port_base(mmio_base, p);
2dcb407e 1045 DPRINTK("EDMA regs (port %i):\n", p);
31961943 1046 mv_dump_mem(port_base, 0x54);
2dcb407e 1047 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
1048 mv_dump_mem(port_base+0x300, 0x60);
1049 }
1050#endif
20f733e7
BR
1051}
1052
1053static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1054{
1055 unsigned int ofs;
1056
1057 switch (sc_reg_in) {
1058 case SCR_STATUS:
1059 case SCR_CONTROL:
1060 case SCR_ERROR:
1061 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1062 break;
1063 case SCR_ACTIVE:
1064 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1065 break;
1066 default:
1067 ofs = 0xffffffffU;
1068 break;
1069 }
1070 return ofs;
1071}
1072
da3dbb17 1073static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
1074{
1075 unsigned int ofs = mv_scr_offset(sc_reg_in);
1076
da3dbb17
TH
1077 if (ofs != 0xffffffffU) {
1078 *val = readl(mv_ap_base(ap) + ofs);
1079 return 0;
1080 } else
1081 return -EINVAL;
20f733e7
BR
1082}
1083
da3dbb17 1084static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
1085{
1086 unsigned int ofs = mv_scr_offset(sc_reg_in);
1087
da3dbb17 1088 if (ofs != 0xffffffffU) {
20f733e7 1089 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1090 return 0;
1091 } else
1092 return -EINVAL;
20f733e7
BR
1093}
1094
f273827e
ML
1095static void mv6_dev_config(struct ata_device *adev)
1096{
1097 /*
e49856d8
ML
1098 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1099 *
1100 * Gen-II does not support NCQ over a port multiplier
1101 * (no FIS-based switching).
1102 *
f273827e
ML
1103 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1104 * See mv_qc_prep() for more info.
1105 */
e49856d8 1106 if (adev->flags & ATA_DFLAG_NCQ) {
352fab70 1107 if (sata_pmp_attached(adev->link->ap)) {
e49856d8 1108 adev->flags &= ~ATA_DFLAG_NCQ;
352fab70
ML
1109 ata_dev_printk(adev, KERN_INFO,
1110 "NCQ disabled for command-based switching\n");
1111 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1112 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1113 ata_dev_printk(adev, KERN_INFO,
1114 "max_sectors limited to %u for NCQ\n",
1115 adev->max_sectors);
1116 }
e49856d8 1117 }
f273827e
ML
1118}
1119
3e4a1391
ML
1120static int mv_qc_defer(struct ata_queued_cmd *qc)
1121{
1122 struct ata_link *link = qc->dev->link;
1123 struct ata_port *ap = link->ap;
1124 struct mv_port_priv *pp = ap->private_data;
1125
29d187bb
ML
1126 /*
1127 * Don't allow new commands if we're in a delayed EH state
1128 * for NCQ and/or FIS-based switching.
1129 */
1130 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1131 return ATA_DEFER_PORT;
3e4a1391
ML
1132 /*
1133 * If the port is completely idle, then allow the new qc.
1134 */
1135 if (ap->nr_active_links == 0)
1136 return 0;
1137
4bdee6c5
TH
1138 /*
1139 * The port is operating in host queuing mode (EDMA) with NCQ
1140 * enabled, allow multiple NCQ commands. EDMA also allows
1141 * queueing multiple DMA commands but libata core currently
1142 * doesn't allow it.
1143 */
1144 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1145 (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol))
1146 return 0;
1147
3e4a1391
ML
1148 return ATA_DEFER_PORT;
1149}
1150
00f42eab 1151static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs)
e49856d8 1152{
00f42eab
ML
1153 u32 new_fiscfg, old_fiscfg;
1154 u32 new_ltmode, old_ltmode;
1155 u32 new_haltcond, old_haltcond;
1156
1157 old_fiscfg = readl(port_mmio + FISCFG_OFS);
1158 old_ltmode = readl(port_mmio + LTMODE_OFS);
1159 old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS);
1160
1161 new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1162 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1163 new_haltcond = old_haltcond | EDMA_ERR_DEV;
1164
1165 if (want_fbs) {
1166 new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC;
1167 new_ltmode = old_ltmode | LTMODE_BIT8;
4c299ca3
ML
1168 if (want_ncq)
1169 new_haltcond &= ~EDMA_ERR_DEV;
1170 else
1171 new_fiscfg |= FISCFG_WAIT_DEV_ERR;
e49856d8 1172 }
00f42eab 1173
8e7decdb
ML
1174 if (new_fiscfg != old_fiscfg)
1175 writelfl(new_fiscfg, port_mmio + FISCFG_OFS);
e49856d8
ML
1176 if (new_ltmode != old_ltmode)
1177 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
00f42eab
ML
1178 if (new_haltcond != old_haltcond)
1179 writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS);
f273827e
ML
1180}
1181
dd2890f6
ML
1182static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1183{
1184 struct mv_host_priv *hpriv = ap->host->private_data;
1185 u32 old, new;
1186
1187 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1188 old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS);
1189 if (want_ncq)
1190 new = old | (1 << 22);
1191 else
1192 new = old & ~(1 << 22);
1193 if (new != old)
1194 writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS);
1195}
1196
e12bef50 1197static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
e4e7b892 1198{
0c58912e 1199 u32 cfg;
e12bef50
ML
1200 struct mv_port_priv *pp = ap->private_data;
1201 struct mv_host_priv *hpriv = ap->host->private_data;
1202 void __iomem *port_mmio = mv_ap_base(ap);
e4e7b892
JG
1203
1204 /* set up non-NCQ EDMA configuration */
0c58912e 1205 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
00f42eab 1206 pp->pp_flags &= ~MV_PP_FLAG_FBS_EN;
e4e7b892 1207
0c58912e 1208 if (IS_GEN_I(hpriv))
e4e7b892
JG
1209 cfg |= (1 << 8); /* enab config burst size mask */
1210
dd2890f6 1211 else if (IS_GEN_II(hpriv)) {
e4e7b892 1212 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
dd2890f6 1213 mv_60x1_errata_sata25(ap, want_ncq);
e4e7b892 1214
dd2890f6 1215 } else if (IS_GEN_IIE(hpriv)) {
00f42eab
ML
1216 int want_fbs = sata_pmp_attached(ap);
1217 /*
1218 * Possible future enhancement:
1219 *
1220 * The chip can use FBS with non-NCQ, if we allow it,
1221 * But first we need to have the error handling in place
1222 * for this mode (datasheet section 7.3.15.4.2.3).
1223 * So disallow non-NCQ FBS for now.
1224 */
1225 want_fbs &= want_ncq;
1226
1227 mv_config_fbs(port_mmio, want_ncq, want_fbs);
1228
1229 if (want_fbs) {
1230 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1231 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1232 }
1233
e728eabe
JG
1234 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1235 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1f398472 1236 if (!IS_SOC(hpriv))
616d4a98
ML
1237 cfg |= (1 << 18); /* enab early completion */
1238 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1239 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
e4e7b892
JG
1240 }
1241
72109168
ML
1242 if (want_ncq) {
1243 cfg |= EDMA_CFG_NCQ;
1244 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1245 } else
1246 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1247
e4e7b892
JG
1248 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1249}
1250
da2fa9ba
ML
1251static void mv_port_free_dma_mem(struct ata_port *ap)
1252{
1253 struct mv_host_priv *hpriv = ap->host->private_data;
1254 struct mv_port_priv *pp = ap->private_data;
eb73d558 1255 int tag;
da2fa9ba
ML
1256
1257 if (pp->crqb) {
1258 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1259 pp->crqb = NULL;
1260 }
1261 if (pp->crpb) {
1262 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1263 pp->crpb = NULL;
1264 }
eb73d558
ML
1265 /*
1266 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1267 * For later hardware, we have one unique sg_tbl per NCQ tag.
1268 */
1269 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1270 if (pp->sg_tbl[tag]) {
1271 if (tag == 0 || !IS_GEN_I(hpriv))
1272 dma_pool_free(hpriv->sg_tbl_pool,
1273 pp->sg_tbl[tag],
1274 pp->sg_tbl_dma[tag]);
1275 pp->sg_tbl[tag] = NULL;
1276 }
da2fa9ba
ML
1277 }
1278}
1279
05b308e1
BR
1280/**
1281 * mv_port_start - Port specific init/start routine.
1282 * @ap: ATA channel to manipulate
1283 *
1284 * Allocate and point to DMA memory, init port private memory,
1285 * zero indices.
1286 *
1287 * LOCKING:
1288 * Inherited from caller.
1289 */
31961943
BR
1290static int mv_port_start(struct ata_port *ap)
1291{
cca3974e
JG
1292 struct device *dev = ap->host->dev;
1293 struct mv_host_priv *hpriv = ap->host->private_data;
31961943 1294 struct mv_port_priv *pp;
dde20207 1295 int tag;
31961943 1296
24dc5f33 1297 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1298 if (!pp)
24dc5f33 1299 return -ENOMEM;
da2fa9ba 1300 ap->private_data = pp;
31961943 1301
da2fa9ba
ML
1302 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1303 if (!pp->crqb)
1304 return -ENOMEM;
1305 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
31961943 1306
da2fa9ba
ML
1307 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1308 if (!pp->crpb)
1309 goto out_port_free_dma_mem;
1310 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
31961943 1311
3bd0a70e
ML
1312 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1313 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1314 ap->flags |= ATA_FLAG_AN;
eb73d558
ML
1315 /*
1316 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1317 * For later hardware, we need one unique sg_tbl per NCQ tag.
1318 */
1319 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1320 if (tag == 0 || !IS_GEN_I(hpriv)) {
1321 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1322 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1323 if (!pp->sg_tbl[tag])
1324 goto out_port_free_dma_mem;
1325 } else {
1326 pp->sg_tbl[tag] = pp->sg_tbl[0];
1327 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1328 }
1329 }
31961943 1330 return 0;
da2fa9ba
ML
1331
1332out_port_free_dma_mem:
1333 mv_port_free_dma_mem(ap);
1334 return -ENOMEM;
31961943
BR
1335}
1336
05b308e1
BR
1337/**
1338 * mv_port_stop - Port specific cleanup/stop routine.
1339 * @ap: ATA channel to manipulate
1340 *
1341 * Stop DMA, cleanup port memory.
1342 *
1343 * LOCKING:
cca3974e 1344 * This routine uses the host lock to protect the DMA stop.
05b308e1 1345 */
31961943
BR
1346static void mv_port_stop(struct ata_port *ap)
1347{
e12bef50 1348 mv_stop_edma(ap);
88e675e1 1349 mv_enable_port_irqs(ap, 0);
da2fa9ba 1350 mv_port_free_dma_mem(ap);
31961943
BR
1351}
1352
05b308e1
BR
1353/**
1354 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1355 * @qc: queued command whose SG list to source from
1356 *
1357 * Populate the SG list and mark the last entry.
1358 *
1359 * LOCKING:
1360 * Inherited from caller.
1361 */
6c08772e 1362static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1363{
1364 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1365 struct scatterlist *sg;
3be6cbd7 1366 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1367 unsigned int si;
31961943 1368
eb73d558 1369 mv_sg = pp->sg_tbl[qc->tag];
ff2aeb1e 1370 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1371 dma_addr_t addr = sg_dma_address(sg);
1372 u32 sg_len = sg_dma_len(sg);
22374677 1373
4007b493
OJ
1374 while (sg_len) {
1375 u32 offset = addr & 0xffff;
1376 u32 len = sg_len;
22374677 1377
4007b493
OJ
1378 if ((offset + sg_len > 0x10000))
1379 len = 0x10000 - offset;
1380
1381 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1382 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1383 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1384
1385 sg_len -= len;
1386 addr += len;
1387
3be6cbd7 1388 last_sg = mv_sg;
4007b493 1389 mv_sg++;
4007b493 1390 }
31961943 1391 }
3be6cbd7
JG
1392
1393 if (likely(last_sg))
1394 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1395}
1396
5796d1c4 1397static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1398{
559eedad 1399 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1400 (last ? CRQB_CMD_LAST : 0);
559eedad 1401 *cmdw = cpu_to_le16(tmp);
31961943
BR
1402}
1403
05b308e1
BR
1404/**
1405 * mv_qc_prep - Host specific command preparation.
1406 * @qc: queued command to prepare
1407 *
1408 * This routine simply redirects to the general purpose routine
1409 * if command is not DMA. Else, it handles prep of the CRQB
1410 * (command request block), does some sanity checking, and calls
1411 * the SG load routine.
1412 *
1413 * LOCKING:
1414 * Inherited from caller.
1415 */
31961943
BR
1416static void mv_qc_prep(struct ata_queued_cmd *qc)
1417{
1418 struct ata_port *ap = qc->ap;
1419 struct mv_port_priv *pp = ap->private_data;
e1469874 1420 __le16 *cw;
31961943
BR
1421 struct ata_taskfile *tf;
1422 u16 flags = 0;
a6432436 1423 unsigned in_index;
31961943 1424
138bfdd0
ML
1425 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1426 (qc->tf.protocol != ATA_PROT_NCQ))
31961943 1427 return;
20f733e7 1428
31961943
BR
1429 /* Fill in command request block
1430 */
e4e7b892 1431 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1432 flags |= CRQB_FLAG_READ;
beec7dbc 1433 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1434 flags |= qc->tag << CRQB_TAG_SHIFT;
e49856d8 1435 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
31961943 1436
bdd4ddde 1437 /* get current queue index from software */
fcfb1f77 1438 in_index = pp->req_idx;
a6432436
ML
1439
1440 pp->crqb[in_index].sg_addr =
eb73d558 1441 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
a6432436 1442 pp->crqb[in_index].sg_addr_hi =
eb73d558 1443 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
a6432436 1444 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1445
a6432436 1446 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1447 tf = &qc->tf;
1448
1449 /* Sadly, the CRQB cannot accomodate all registers--there are
1450 * only 11 bytes...so we must pick and choose required
1451 * registers based on the command. So, we drop feature and
1452 * hob_feature for [RW] DMA commands, but they are needed for
1453 * NCQ. NCQ will drop hob_nsect.
20f733e7 1454 */
31961943
BR
1455 switch (tf->command) {
1456 case ATA_CMD_READ:
1457 case ATA_CMD_READ_EXT:
1458 case ATA_CMD_WRITE:
1459 case ATA_CMD_WRITE_EXT:
c15d85c8 1460 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1461 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1462 break;
31961943
BR
1463 case ATA_CMD_FPDMA_READ:
1464 case ATA_CMD_FPDMA_WRITE:
8b260248 1465 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1466 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1467 break;
31961943
BR
1468 default:
1469 /* The only other commands EDMA supports in non-queued and
1470 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1471 * of which are defined/used by Linux. If we get here, this
1472 * driver needs work.
1473 *
1474 * FIXME: modify libata to give qc_prep a return value and
1475 * return error here.
1476 */
1477 BUG_ON(tf->command);
1478 break;
1479 }
1480 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1481 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1482 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1483 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1484 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1485 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1486 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1487 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1488 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1489
e4e7b892
JG
1490 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1491 return;
1492 mv_fill_sg(qc);
1493}
1494
1495/**
1496 * mv_qc_prep_iie - Host specific command preparation.
1497 * @qc: queued command to prepare
1498 *
1499 * This routine simply redirects to the general purpose routine
1500 * if command is not DMA. Else, it handles prep of the CRQB
1501 * (command request block), does some sanity checking, and calls
1502 * the SG load routine.
1503 *
1504 * LOCKING:
1505 * Inherited from caller.
1506 */
1507static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1508{
1509 struct ata_port *ap = qc->ap;
1510 struct mv_port_priv *pp = ap->private_data;
1511 struct mv_crqb_iie *crqb;
1512 struct ata_taskfile *tf;
a6432436 1513 unsigned in_index;
e4e7b892
JG
1514 u32 flags = 0;
1515
138bfdd0
ML
1516 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1517 (qc->tf.protocol != ATA_PROT_NCQ))
e4e7b892
JG
1518 return;
1519
e12bef50 1520 /* Fill in Gen IIE command request block */
e4e7b892
JG
1521 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1522 flags |= CRQB_FLAG_READ;
1523
beec7dbc 1524 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1525 flags |= qc->tag << CRQB_TAG_SHIFT;
8c0aeb4a 1526 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
e49856d8 1527 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
e4e7b892 1528
bdd4ddde 1529 /* get current queue index from software */
fcfb1f77 1530 in_index = pp->req_idx;
a6432436
ML
1531
1532 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
eb73d558
ML
1533 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1534 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
e4e7b892
JG
1535 crqb->flags = cpu_to_le32(flags);
1536
1537 tf = &qc->tf;
1538 crqb->ata_cmd[0] = cpu_to_le32(
1539 (tf->command << 16) |
1540 (tf->feature << 24)
1541 );
1542 crqb->ata_cmd[1] = cpu_to_le32(
1543 (tf->lbal << 0) |
1544 (tf->lbam << 8) |
1545 (tf->lbah << 16) |
1546 (tf->device << 24)
1547 );
1548 crqb->ata_cmd[2] = cpu_to_le32(
1549 (tf->hob_lbal << 0) |
1550 (tf->hob_lbam << 8) |
1551 (tf->hob_lbah << 16) |
1552 (tf->hob_feature << 24)
1553 );
1554 crqb->ata_cmd[3] = cpu_to_le32(
1555 (tf->nsect << 0) |
1556 (tf->hob_nsect << 8)
1557 );
1558
1559 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1560 return;
31961943
BR
1561 mv_fill_sg(qc);
1562}
1563
05b308e1
BR
1564/**
1565 * mv_qc_issue - Initiate a command to the host
1566 * @qc: queued command to start
1567 *
1568 * This routine simply redirects to the general purpose routine
1569 * if command is not DMA. Else, it sanity checks our local
1570 * caches of the request producer/consumer indices then enables
1571 * DMA and bumps the request producer index.
1572 *
1573 * LOCKING:
1574 * Inherited from caller.
1575 */
9a3d9eb0 1576static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1577{
c5d3e45a
JG
1578 struct ata_port *ap = qc->ap;
1579 void __iomem *port_mmio = mv_ap_base(ap);
1580 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1581 u32 in_index;
31961943 1582
138bfdd0
ML
1583 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1584 (qc->tf.protocol != ATA_PROT_NCQ)) {
c6112bd8
ML
1585 static int limit_warnings = 10;
1586 /*
1587 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
1588 *
1589 * Someday, we might implement special polling workarounds
1590 * for these, but it all seems rather unnecessary since we
1591 * normally use only DMA for commands which transfer more
1592 * than a single block of data.
1593 *
1594 * Much of the time, this could just work regardless.
1595 * So for now, just log the incident, and allow the attempt.
1596 */
c7843e8f 1597 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
c6112bd8
ML
1598 --limit_warnings;
1599 ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
1600 ": attempting PIO w/multiple DRQ: "
1601 "this may fail due to h/w errata\n");
1602 }
17c5aab5
ML
1603 /*
1604 * We're about to send a non-EDMA capable command to the
31961943
BR
1605 * port. Turn off EDMA so there won't be problems accessing
1606 * shadow block, etc registers.
1607 */
b562468c 1608 mv_stop_edma(ap);
88e675e1 1609 mv_enable_port_irqs(ap, ERR_IRQ);
e49856d8 1610 mv_pmp_select(ap, qc->dev->link->pmp);
9363c382 1611 return ata_sff_qc_issue(qc);
31961943
BR
1612 }
1613
72109168 1614 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
bdd4ddde 1615
fcfb1f77
ML
1616 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
1617 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1618
1619 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1620 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1621 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1622
1623 return 0;
1624}
1625
8f767f8a
ML
1626static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
1627{
1628 struct mv_port_priv *pp = ap->private_data;
1629 struct ata_queued_cmd *qc;
1630
1631 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1632 return NULL;
1633 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1634 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1635 qc = NULL;
1636 return qc;
1637}
1638
29d187bb
ML
1639static void mv_pmp_error_handler(struct ata_port *ap)
1640{
1641 unsigned int pmp, pmp_map;
1642 struct mv_port_priv *pp = ap->private_data;
1643
1644 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
1645 /*
1646 * Perform NCQ error analysis on failed PMPs
1647 * before we freeze the port entirely.
1648 *
1649 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
1650 */
1651 pmp_map = pp->delayed_eh_pmp_map;
1652 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
1653 for (pmp = 0; pmp_map != 0; pmp++) {
1654 unsigned int this_pmp = (1 << pmp);
1655 if (pmp_map & this_pmp) {
1656 struct ata_link *link = &ap->pmp_link[pmp];
1657 pmp_map &= ~this_pmp;
1658 ata_eh_analyze_ncq_error(link);
1659 }
1660 }
1661 ata_port_freeze(ap);
1662 }
1663 sata_pmp_error_handler(ap);
1664}
1665
4c299ca3
ML
1666static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
1667{
1668 void __iomem *port_mmio = mv_ap_base(ap);
1669
1670 return readl(port_mmio + SATA_TESTCTL_OFS) >> 16;
1671}
1672
4c299ca3
ML
1673static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
1674{
1675 struct ata_eh_info *ehi;
1676 unsigned int pmp;
1677
1678 /*
1679 * Initialize EH info for PMPs which saw device errors
1680 */
1681 ehi = &ap->link.eh_info;
1682 for (pmp = 0; pmp_map != 0; pmp++) {
1683 unsigned int this_pmp = (1 << pmp);
1684 if (pmp_map & this_pmp) {
1685 struct ata_link *link = &ap->pmp_link[pmp];
1686
1687 pmp_map &= ~this_pmp;
1688 ehi = &link->eh_info;
1689 ata_ehi_clear_desc(ehi);
1690 ata_ehi_push_desc(ehi, "dev err");
1691 ehi->err_mask |= AC_ERR_DEV;
1692 ehi->action |= ATA_EH_RESET;
1693 ata_link_abort(link);
1694 }
1695 }
1696}
1697
06aaca3f
ML
1698static int mv_req_q_empty(struct ata_port *ap)
1699{
1700 void __iomem *port_mmio = mv_ap_base(ap);
1701 u32 in_ptr, out_ptr;
1702
1703 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS)
1704 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1705 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1706 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1707 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
1708}
1709
4c299ca3
ML
1710static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
1711{
1712 struct mv_port_priv *pp = ap->private_data;
1713 int failed_links;
1714 unsigned int old_map, new_map;
1715
1716 /*
1717 * Device error during FBS+NCQ operation:
1718 *
1719 * Set a port flag to prevent further I/O being enqueued.
1720 * Leave the EDMA running to drain outstanding commands from this port.
1721 * Perform the post-mortem/EH only when all responses are complete.
1722 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
1723 */
1724 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
1725 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
1726 pp->delayed_eh_pmp_map = 0;
1727 }
1728 old_map = pp->delayed_eh_pmp_map;
1729 new_map = old_map | mv_get_err_pmp_map(ap);
1730
1731 if (old_map != new_map) {
1732 pp->delayed_eh_pmp_map = new_map;
1733 mv_pmp_eh_prep(ap, new_map & ~old_map);
1734 }
c46938cc 1735 failed_links = hweight16(new_map);
4c299ca3
ML
1736
1737 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
1738 "failed_links=%d nr_active_links=%d\n",
1739 __func__, pp->delayed_eh_pmp_map,
1740 ap->qc_active, failed_links,
1741 ap->nr_active_links);
1742
06aaca3f 1743 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
4c299ca3
ML
1744 mv_process_crpb_entries(ap, pp);
1745 mv_stop_edma(ap);
1746 mv_eh_freeze(ap);
1747 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
1748 return 1; /* handled */
1749 }
1750 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
1751 return 1; /* handled */
1752}
1753
1754static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
1755{
1756 /*
1757 * Possible future enhancement:
1758 *
1759 * FBS+non-NCQ operation is not yet implemented.
1760 * See related notes in mv_edma_cfg().
1761 *
1762 * Device error during FBS+non-NCQ operation:
1763 *
1764 * We need to snapshot the shadow registers for each failed command.
1765 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
1766 */
1767 return 0; /* not handled */
1768}
1769
1770static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
1771{
1772 struct mv_port_priv *pp = ap->private_data;
1773
1774 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1775 return 0; /* EDMA was not active: not handled */
1776 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
1777 return 0; /* FBS was not active: not handled */
1778
1779 if (!(edma_err_cause & EDMA_ERR_DEV))
1780 return 0; /* non DEV error: not handled */
1781 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
1782 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
1783 return 0; /* other problems: not handled */
1784
1785 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
1786 /*
1787 * EDMA should NOT have self-disabled for this case.
1788 * If it did, then something is wrong elsewhere,
1789 * and we cannot handle it here.
1790 */
1791 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1792 ata_port_printk(ap, KERN_WARNING,
1793 "%s: err_cause=0x%x pp_flags=0x%x\n",
1794 __func__, edma_err_cause, pp->pp_flags);
1795 return 0; /* not handled */
1796 }
1797 return mv_handle_fbs_ncq_dev_err(ap);
1798 } else {
1799 /*
1800 * EDMA should have self-disabled for this case.
1801 * If it did not, then something is wrong elsewhere,
1802 * and we cannot handle it here.
1803 */
1804 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
1805 ata_port_printk(ap, KERN_WARNING,
1806 "%s: err_cause=0x%x pp_flags=0x%x\n",
1807 __func__, edma_err_cause, pp->pp_flags);
1808 return 0; /* not handled */
1809 }
1810 return mv_handle_fbs_non_ncq_dev_err(ap);
1811 }
1812 return 0; /* not handled */
1813}
1814
a9010329 1815static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
8f767f8a 1816{
8f767f8a 1817 struct ata_eh_info *ehi = &ap->link.eh_info;
a9010329 1818 char *when = "idle";
8f767f8a 1819
8f767f8a 1820 ata_ehi_clear_desc(ehi);
a9010329
ML
1821 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
1822 when = "disabled";
1823 } else if (edma_was_enabled) {
1824 when = "EDMA enabled";
8f767f8a
ML
1825 } else {
1826 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
1827 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
a9010329 1828 when = "polling";
8f767f8a 1829 }
a9010329 1830 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
8f767f8a
ML
1831 ehi->err_mask |= AC_ERR_OTHER;
1832 ehi->action |= ATA_EH_RESET;
1833 ata_port_freeze(ap);
1834}
1835
05b308e1
BR
1836/**
1837 * mv_err_intr - Handle error interrupts on the port
1838 * @ap: ATA channel to manipulate
8d07379d 1839 * @qc: affected command (non-NCQ), or NULL
05b308e1 1840 *
8d07379d
ML
1841 * Most cases require a full reset of the chip's state machine,
1842 * which also performs a COMRESET.
1843 * Also, if the port disabled DMA, update our cached copy to match.
05b308e1
BR
1844 *
1845 * LOCKING:
1846 * Inherited from caller.
1847 */
37b9046a 1848static void mv_err_intr(struct ata_port *ap)
31961943
BR
1849{
1850 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde 1851 u32 edma_err_cause, eh_freeze_mask, serr = 0;
e4006077 1852 u32 fis_cause = 0;
bdd4ddde
JG
1853 struct mv_port_priv *pp = ap->private_data;
1854 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde 1855 unsigned int action = 0, err_mask = 0;
9af5c9c9 1856 struct ata_eh_info *ehi = &ap->link.eh_info;
37b9046a
ML
1857 struct ata_queued_cmd *qc;
1858 int abort = 0;
20f733e7 1859
8d07379d 1860 /*
37b9046a 1861 * Read and clear the SError and err_cause bits.
e4006077
ML
1862 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
1863 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
8d07379d 1864 */
37b9046a
ML
1865 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1866 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1867
bdd4ddde 1868 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
e4006077
ML
1869 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
1870 fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
1871 writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
1872 }
8d07379d 1873 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
bdd4ddde 1874
4c299ca3
ML
1875 if (edma_err_cause & EDMA_ERR_DEV) {
1876 /*
1877 * Device errors during FIS-based switching operation
1878 * require special handling.
1879 */
1880 if (mv_handle_dev_err(ap, edma_err_cause))
1881 return;
1882 }
1883
37b9046a
ML
1884 qc = mv_get_active_qc(ap);
1885 ata_ehi_clear_desc(ehi);
1886 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
1887 edma_err_cause, pp->pp_flags);
e4006077 1888
c443c500 1889 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
e4006077 1890 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
c443c500
ML
1891 if (fis_cause & SATA_FIS_IRQ_AN) {
1892 u32 ec = edma_err_cause &
1893 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
1894 sata_async_notification(ap);
1895 if (!ec)
1896 return; /* Just an AN; no need for the nukes */
1897 ata_ehi_push_desc(ehi, "SDB notify");
1898 }
1899 }
bdd4ddde 1900 /*
352fab70 1901 * All generations share these EDMA error cause bits:
bdd4ddde 1902 */
37b9046a 1903 if (edma_err_cause & EDMA_ERR_DEV) {
bdd4ddde 1904 err_mask |= AC_ERR_DEV;
37b9046a
ML
1905 action |= ATA_EH_RESET;
1906 ata_ehi_push_desc(ehi, "dev error");
1907 }
bdd4ddde 1908 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1909 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1910 EDMA_ERR_INTRL_PAR)) {
1911 err_mask |= AC_ERR_ATA_BUS;
cf480626 1912 action |= ATA_EH_RESET;
b64bbc39 1913 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1914 }
1915 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1916 ata_ehi_hotplugged(ehi);
1917 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1918 "dev disconnect" : "dev connect");
cf480626 1919 action |= ATA_EH_RESET;
bdd4ddde
JG
1920 }
1921
352fab70
ML
1922 /*
1923 * Gen-I has a different SELF_DIS bit,
1924 * different FREEZE bits, and no SERR bit:
1925 */
ee9ccdf7 1926 if (IS_GEN_I(hpriv)) {
bdd4ddde 1927 eh_freeze_mask = EDMA_EH_FREEZE_5;
bdd4ddde 1928 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
bdd4ddde 1929 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1930 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1931 }
1932 } else {
1933 eh_freeze_mask = EDMA_EH_FREEZE;
bdd4ddde 1934 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
bdd4ddde 1935 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1936 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde 1937 }
bdd4ddde 1938 if (edma_err_cause & EDMA_ERR_SERR) {
8d07379d
ML
1939 ata_ehi_push_desc(ehi, "SError=%08x", serr);
1940 err_mask |= AC_ERR_ATA_BUS;
cf480626 1941 action |= ATA_EH_RESET;
bdd4ddde 1942 }
afb0edd9 1943 }
20f733e7 1944
bdd4ddde
JG
1945 if (!err_mask) {
1946 err_mask = AC_ERR_OTHER;
cf480626 1947 action |= ATA_EH_RESET;
bdd4ddde
JG
1948 }
1949
1950 ehi->serror |= serr;
1951 ehi->action |= action;
1952
1953 if (qc)
1954 qc->err_mask |= err_mask;
1955 else
1956 ehi->err_mask |= err_mask;
1957
37b9046a
ML
1958 if (err_mask == AC_ERR_DEV) {
1959 /*
1960 * Cannot do ata_port_freeze() here,
1961 * because it would kill PIO access,
1962 * which is needed for further diagnosis.
1963 */
1964 mv_eh_freeze(ap);
1965 abort = 1;
1966 } else if (edma_err_cause & eh_freeze_mask) {
1967 /*
1968 * Note to self: ata_port_freeze() calls ata_port_abort()
1969 */
bdd4ddde 1970 ata_port_freeze(ap);
37b9046a
ML
1971 } else {
1972 abort = 1;
1973 }
1974
1975 if (abort) {
1976 if (qc)
1977 ata_link_abort(qc->dev->link);
1978 else
1979 ata_port_abort(ap);
1980 }
bdd4ddde
JG
1981}
1982
fcfb1f77
ML
1983static void mv_process_crpb_response(struct ata_port *ap,
1984 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
1985{
1986 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1987
1988 if (qc) {
1989 u8 ata_status;
1990 u16 edma_status = le16_to_cpu(response->flags);
1991 /*
1992 * edma_status from a response queue entry:
1993 * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
1994 * MSB is saved ATA status from command completion.
1995 */
1996 if (!ncq_enabled) {
1997 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
1998 if (err_cause) {
1999 /*
2000 * Error will be seen/handled by mv_err_intr().
2001 * So do nothing at all here.
2002 */
2003 return;
2004 }
2005 }
2006 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
37b9046a
ML
2007 if (!ac_err_mask(ata_status))
2008 ata_qc_complete(qc);
2009 /* else: leave it for mv_err_intr() */
fcfb1f77
ML
2010 } else {
2011 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
2012 __func__, tag);
2013 }
2014}
2015
2016static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
bdd4ddde
JG
2017{
2018 void __iomem *port_mmio = mv_ap_base(ap);
2019 struct mv_host_priv *hpriv = ap->host->private_data;
fcfb1f77 2020 u32 in_index;
bdd4ddde 2021 bool work_done = false;
fcfb1f77 2022 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
bdd4ddde 2023
fcfb1f77 2024 /* Get the hardware queue position index */
bdd4ddde
JG
2025 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
2026 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2027
fcfb1f77
ML
2028 /* Process new responses from since the last time we looked */
2029 while (in_index != pp->resp_idx) {
6c1153e0 2030 unsigned int tag;
fcfb1f77 2031 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
bdd4ddde 2032
fcfb1f77 2033 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
bdd4ddde 2034
fcfb1f77
ML
2035 if (IS_GEN_I(hpriv)) {
2036 /* 50xx: no NCQ, only one command active at a time */
9af5c9c9 2037 tag = ap->link.active_tag;
fcfb1f77
ML
2038 } else {
2039 /* Gen II/IIE: get command tag from CRPB entry */
2040 tag = le16_to_cpu(response->id) & 0x1f;
bdd4ddde 2041 }
fcfb1f77 2042 mv_process_crpb_response(ap, response, tag, ncq_enabled);
bdd4ddde 2043 work_done = true;
bdd4ddde
JG
2044 }
2045
352fab70 2046 /* Update the software queue position index in hardware */
bdd4ddde
JG
2047 if (work_done)
2048 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
fcfb1f77 2049 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
bdd4ddde 2050 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
2051}
2052
a9010329
ML
2053static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2054{
2055 struct mv_port_priv *pp;
2056 int edma_was_enabled;
2057
2058 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
2059 mv_unexpected_intr(ap, 0);
2060 return;
2061 }
2062 /*
2063 * Grab a snapshot of the EDMA_EN flag setting,
2064 * so that we have a consistent view for this port,
2065 * even if something we call of our routines changes it.
2066 */
2067 pp = ap->private_data;
2068 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2069 /*
2070 * Process completed CRPB response(s) before other events.
2071 */
2072 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2073 mv_process_crpb_entries(ap, pp);
4c299ca3
ML
2074 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2075 mv_handle_fbs_ncq_dev_err(ap);
a9010329
ML
2076 }
2077 /*
2078 * Handle chip-reported errors, or continue on to handle PIO.
2079 */
2080 if (unlikely(port_cause & ERR_IRQ)) {
2081 mv_err_intr(ap);
2082 } else if (!edma_was_enabled) {
2083 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2084 if (qc)
2085 ata_sff_host_intr(ap, qc);
2086 else
2087 mv_unexpected_intr(ap, edma_was_enabled);
2088 }
2089}
2090
05b308e1
BR
2091/**
2092 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 2093 * @host: host specific structure
7368f919 2094 * @main_irq_cause: Main interrupt cause register for the chip.
05b308e1
BR
2095 *
2096 * LOCKING:
2097 * Inherited from caller.
2098 */
7368f919 2099static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
20f733e7 2100{
f351b2d6 2101 struct mv_host_priv *hpriv = host->private_data;
eabd5eb1 2102 void __iomem *mmio = hpriv->base, *hc_mmio;
a3718c1f 2103 unsigned int handled = 0, port;
20f733e7 2104
a3718c1f 2105 for (port = 0; port < hpriv->n_ports; port++) {
cca3974e 2106 struct ata_port *ap = host->ports[port];
eabd5eb1
ML
2107 unsigned int p, shift, hardport, port_cause;
2108
a3718c1f 2109 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
a3718c1f 2110 /*
eabd5eb1
ML
2111 * Each hc within the host has its own hc_irq_cause register,
2112 * where the interrupting ports bits get ack'd.
a3718c1f 2113 */
eabd5eb1
ML
2114 if (hardport == 0) { /* first port on this hc ? */
2115 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2116 u32 port_mask, ack_irqs;
2117 /*
2118 * Skip this entire hc if nothing pending for any ports
2119 */
2120 if (!hc_cause) {
2121 port += MV_PORTS_PER_HC - 1;
2122 continue;
2123 }
2124 /*
2125 * We don't need/want to read the hc_irq_cause register,
2126 * because doing so hurts performance, and
2127 * main_irq_cause already gives us everything we need.
2128 *
2129 * But we do have to *write* to the hc_irq_cause to ack
2130 * the ports that we are handling this time through.
2131 *
2132 * This requires that we create a bitmap for those
2133 * ports which interrupted us, and use that bitmap
2134 * to ack (only) those ports via hc_irq_cause.
2135 */
2136 ack_irqs = 0;
2137 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2138 if ((port + p) >= hpriv->n_ports)
2139 break;
2140 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2141 if (hc_cause & port_mask)
2142 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2143 }
a3718c1f 2144 hc_mmio = mv_hc_base_from_port(mmio, port);
eabd5eb1 2145 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS);
a3718c1f
ML
2146 handled = 1;
2147 }
8f767f8a 2148 /*
a9010329 2149 * Handle interrupts signalled for this port:
8f767f8a 2150 */
a9010329
ML
2151 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2152 if (port_cause)
2153 mv_port_intr(ap, port_cause);
20f733e7 2154 }
a3718c1f 2155 return handled;
20f733e7
BR
2156}
2157
a3718c1f 2158static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
bdd4ddde 2159{
02a121da 2160 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
2161 struct ata_port *ap;
2162 struct ata_queued_cmd *qc;
2163 struct ata_eh_info *ehi;
2164 unsigned int i, err_mask, printed = 0;
2165 u32 err_cause;
2166
02a121da 2167 err_cause = readl(mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
2168
2169 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
2170 err_cause);
2171
2172 DPRINTK("All regs @ PCI error\n");
2173 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2174
02a121da 2175 writelfl(0, mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
2176
2177 for (i = 0; i < host->n_ports; i++) {
2178 ap = host->ports[i];
936fd732 2179 if (!ata_link_offline(&ap->link)) {
9af5c9c9 2180 ehi = &ap->link.eh_info;
bdd4ddde
JG
2181 ata_ehi_clear_desc(ehi);
2182 if (!printed++)
2183 ata_ehi_push_desc(ehi,
2184 "PCI err cause 0x%08x", err_cause);
2185 err_mask = AC_ERR_HOST_BUS;
cf480626 2186 ehi->action = ATA_EH_RESET;
9af5c9c9 2187 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
2188 if (qc)
2189 qc->err_mask |= err_mask;
2190 else
2191 ehi->err_mask |= err_mask;
2192
2193 ata_port_freeze(ap);
2194 }
2195 }
a3718c1f 2196 return 1; /* handled */
bdd4ddde
JG
2197}
2198
05b308e1 2199/**
c5d3e45a 2200 * mv_interrupt - Main interrupt event handler
05b308e1
BR
2201 * @irq: unused
2202 * @dev_instance: private data; in this case the host structure
05b308e1
BR
2203 *
2204 * Read the read only register to determine if any host
2205 * controllers have pending interrupts. If so, call lower level
2206 * routine to handle. Also check for PCI errors which are only
2207 * reported here.
2208 *
8b260248 2209 * LOCKING:
cca3974e 2210 * This routine holds the host lock while processing pending
05b308e1
BR
2211 * interrupts.
2212 */
7d12e780 2213static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 2214{
cca3974e 2215 struct ata_host *host = dev_instance;
f351b2d6 2216 struct mv_host_priv *hpriv = host->private_data;
a3718c1f 2217 unsigned int handled = 0;
96e2c487 2218 u32 main_irq_cause, pending_irqs;
20f733e7 2219
646a4da5 2220 spin_lock(&host->lock);
7368f919 2221 main_irq_cause = readl(hpriv->main_irq_cause_addr);
96e2c487 2222 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
352fab70
ML
2223 /*
2224 * Deal with cases where we either have nothing pending, or have read
2225 * a bogus register value which can indicate HW removal or PCI fault.
20f733e7 2226 */
a44253d2 2227 if (pending_irqs && main_irq_cause != 0xffffffffU) {
1f398472 2228 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
a3718c1f
ML
2229 handled = mv_pci_error(host, hpriv->base);
2230 else
a44253d2 2231 handled = mv_host_intr(host, pending_irqs);
bdd4ddde 2232 }
cca3974e 2233 spin_unlock(&host->lock);
20f733e7
BR
2234 return IRQ_RETVAL(handled);
2235}
2236
c9d39130
JG
2237static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
2238{
2239 unsigned int ofs;
2240
2241 switch (sc_reg_in) {
2242 case SCR_STATUS:
2243 case SCR_ERROR:
2244 case SCR_CONTROL:
2245 ofs = sc_reg_in * sizeof(u32);
2246 break;
2247 default:
2248 ofs = 0xffffffffU;
2249 break;
2250 }
2251 return ofs;
2252}
2253
da3dbb17 2254static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 2255{
f351b2d6
SB
2256 struct mv_host_priv *hpriv = ap->host->private_data;
2257 void __iomem *mmio = hpriv->base;
0d5ff566 2258 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
2259 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2260
da3dbb17
TH
2261 if (ofs != 0xffffffffU) {
2262 *val = readl(addr + ofs);
2263 return 0;
2264 } else
2265 return -EINVAL;
c9d39130
JG
2266}
2267
da3dbb17 2268static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 2269{
f351b2d6
SB
2270 struct mv_host_priv *hpriv = ap->host->private_data;
2271 void __iomem *mmio = hpriv->base;
0d5ff566 2272 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
2273 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2274
da3dbb17 2275 if (ofs != 0xffffffffU) {
0d5ff566 2276 writelfl(val, addr + ofs);
da3dbb17
TH
2277 return 0;
2278 } else
2279 return -EINVAL;
c9d39130
JG
2280}
2281
7bb3c529 2282static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
522479fb 2283{
7bb3c529 2284 struct pci_dev *pdev = to_pci_dev(host->dev);
522479fb
JG
2285 int early_5080;
2286
44c10138 2287 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
2288
2289 if (!early_5080) {
2290 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
2291 tmp |= (1 << 0);
2292 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
2293 }
2294
7bb3c529 2295 mv_reset_pci_bus(host, mmio);
522479fb
JG
2296}
2297
2298static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2299{
8e7decdb 2300 writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS);
522479fb
JG
2301}
2302
47c2b677 2303static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2304 void __iomem *mmio)
2305{
c9d39130
JG
2306 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
2307 u32 tmp;
2308
2309 tmp = readl(phy_mmio + MV5_PHY_MODE);
2310
2311 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
2312 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
2313}
2314
47c2b677 2315static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2316{
522479fb
JG
2317 u32 tmp;
2318
8e7decdb 2319 writel(0, mmio + MV_GPIO_PORT_CTL_OFS);
522479fb
JG
2320
2321 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
2322
2323 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
2324 tmp |= ~(1 << 0);
2325 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
2326}
2327
2a47ce06
JG
2328static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2329 unsigned int port)
bca1c4eb 2330{
c9d39130
JG
2331 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
2332 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
2333 u32 tmp;
2334 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
2335
2336 if (fix_apm_sq) {
8e7decdb 2337 tmp = readl(phy_mmio + MV5_LTMODE_OFS);
c9d39130 2338 tmp |= (1 << 19);
8e7decdb 2339 writel(tmp, phy_mmio + MV5_LTMODE_OFS);
c9d39130 2340
8e7decdb 2341 tmp = readl(phy_mmio + MV5_PHY_CTL_OFS);
c9d39130
JG
2342 tmp &= ~0x3;
2343 tmp |= 0x1;
8e7decdb 2344 writel(tmp, phy_mmio + MV5_PHY_CTL_OFS);
c9d39130
JG
2345 }
2346
2347 tmp = readl(phy_mmio + MV5_PHY_MODE);
2348 tmp &= ~mask;
2349 tmp |= hpriv->signal[port].pre;
2350 tmp |= hpriv->signal[port].amps;
2351 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
2352}
2353
c9d39130
JG
2354
2355#undef ZERO
2356#define ZERO(reg) writel(0, port_mmio + (reg))
2357static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
2358 unsigned int port)
2359{
2360 void __iomem *port_mmio = mv_port_base(mmio, port);
2361
e12bef50 2362 mv_reset_channel(hpriv, mmio, port);
c9d39130
JG
2363
2364 ZERO(0x028); /* command */
2365 writel(0x11f, port_mmio + EDMA_CFG_OFS);
2366 ZERO(0x004); /* timer */
2367 ZERO(0x008); /* irq err cause */
2368 ZERO(0x00c); /* irq err mask */
2369 ZERO(0x010); /* rq bah */
2370 ZERO(0x014); /* rq inp */
2371 ZERO(0x018); /* rq outp */
2372 ZERO(0x01c); /* respq bah */
2373 ZERO(0x024); /* respq outp */
2374 ZERO(0x020); /* respq inp */
2375 ZERO(0x02c); /* test control */
8e7decdb 2376 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
c9d39130
JG
2377}
2378#undef ZERO
2379
2380#define ZERO(reg) writel(0, hc_mmio + (reg))
2381static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2382 unsigned int hc)
47c2b677 2383{
c9d39130
JG
2384 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2385 u32 tmp;
2386
2387 ZERO(0x00c);
2388 ZERO(0x010);
2389 ZERO(0x014);
2390 ZERO(0x018);
2391
2392 tmp = readl(hc_mmio + 0x20);
2393 tmp &= 0x1c1c1c1c;
2394 tmp |= 0x03030303;
2395 writel(tmp, hc_mmio + 0x20);
2396}
2397#undef ZERO
2398
2399static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2400 unsigned int n_hc)
2401{
2402 unsigned int hc, port;
2403
2404 for (hc = 0; hc < n_hc; hc++) {
2405 for (port = 0; port < MV_PORTS_PER_HC; port++)
2406 mv5_reset_hc_port(hpriv, mmio,
2407 (hc * MV_PORTS_PER_HC) + port);
2408
2409 mv5_reset_one_hc(hpriv, mmio, hc);
2410 }
2411
2412 return 0;
47c2b677
JG
2413}
2414
101ffae2
JG
2415#undef ZERO
2416#define ZERO(reg) writel(0, mmio + (reg))
7bb3c529 2417static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
101ffae2 2418{
02a121da 2419 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
2420 u32 tmp;
2421
8e7decdb 2422 tmp = readl(mmio + MV_PCI_MODE_OFS);
101ffae2 2423 tmp &= 0xff00ffff;
8e7decdb 2424 writel(tmp, mmio + MV_PCI_MODE_OFS);
101ffae2
JG
2425
2426 ZERO(MV_PCI_DISC_TIMER);
2427 ZERO(MV_PCI_MSI_TRIGGER);
8e7decdb 2428 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
101ffae2 2429 ZERO(MV_PCI_SERR_MASK);
02a121da
ML
2430 ZERO(hpriv->irq_cause_ofs);
2431 ZERO(hpriv->irq_mask_ofs);
101ffae2
JG
2432 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2433 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2434 ZERO(MV_PCI_ERR_ATTRIBUTE);
2435 ZERO(MV_PCI_ERR_COMMAND);
2436}
2437#undef ZERO
2438
2439static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2440{
2441 u32 tmp;
2442
2443 mv5_reset_flash(hpriv, mmio);
2444
8e7decdb 2445 tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS);
101ffae2
JG
2446 tmp &= 0x3;
2447 tmp |= (1 << 5) | (1 << 6);
8e7decdb 2448 writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS);
101ffae2
JG
2449}
2450
2451/**
2452 * mv6_reset_hc - Perform the 6xxx global soft reset
2453 * @mmio: base address of the HBA
2454 *
2455 * This routine only applies to 6xxx parts.
2456 *
2457 * LOCKING:
2458 * Inherited from caller.
2459 */
c9d39130
JG
2460static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2461 unsigned int n_hc)
101ffae2
JG
2462{
2463 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2464 int i, rc = 0;
2465 u32 t;
2466
2467 /* Following procedure defined in PCI "main command and status
2468 * register" table.
2469 */
2470 t = readl(reg);
2471 writel(t | STOP_PCI_MASTER, reg);
2472
2473 for (i = 0; i < 1000; i++) {
2474 udelay(1);
2475 t = readl(reg);
2dcb407e 2476 if (PCI_MASTER_EMPTY & t)
101ffae2 2477 break;
101ffae2
JG
2478 }
2479 if (!(PCI_MASTER_EMPTY & t)) {
2480 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2481 rc = 1;
2482 goto done;
2483 }
2484
2485 /* set reset */
2486 i = 5;
2487 do {
2488 writel(t | GLOB_SFT_RST, reg);
2489 t = readl(reg);
2490 udelay(1);
2491 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2492
2493 if (!(GLOB_SFT_RST & t)) {
2494 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2495 rc = 1;
2496 goto done;
2497 }
2498
2499 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2500 i = 5;
2501 do {
2502 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2503 t = readl(reg);
2504 udelay(1);
2505 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2506
2507 if (GLOB_SFT_RST & t) {
2508 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2509 rc = 1;
2510 }
2511done:
2512 return rc;
2513}
2514
47c2b677 2515static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2516 void __iomem *mmio)
2517{
2518 void __iomem *port_mmio;
2519 u32 tmp;
2520
8e7decdb 2521 tmp = readl(mmio + MV_RESET_CFG_OFS);
ba3fe8fb 2522 if ((tmp & (1 << 0)) == 0) {
47c2b677 2523 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2524 hpriv->signal[idx].pre = 0x1 << 5;
2525 return;
2526 }
2527
2528 port_mmio = mv_port_base(mmio, idx);
2529 tmp = readl(port_mmio + PHY_MODE2);
2530
2531 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2532 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2533}
2534
47c2b677 2535static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2536{
8e7decdb 2537 writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS);
ba3fe8fb
JG
2538}
2539
c9d39130 2540static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2541 unsigned int port)
bca1c4eb 2542{
c9d39130
JG
2543 void __iomem *port_mmio = mv_port_base(mmio, port);
2544
bca1c4eb 2545 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2546 int fix_phy_mode2 =
2547 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2548 int fix_phy_mode4 =
47c2b677 2549 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
8c30a8b9 2550 u32 m2, m3;
47c2b677
JG
2551
2552 if (fix_phy_mode2) {
2553 m2 = readl(port_mmio + PHY_MODE2);
2554 m2 &= ~(1 << 16);
2555 m2 |= (1 << 31);
2556 writel(m2, port_mmio + PHY_MODE2);
2557
2558 udelay(200);
2559
2560 m2 = readl(port_mmio + PHY_MODE2);
2561 m2 &= ~((1 << 16) | (1 << 31));
2562 writel(m2, port_mmio + PHY_MODE2);
2563
2564 udelay(200);
2565 }
2566
8c30a8b9
ML
2567 /*
2568 * Gen-II/IIe PHY_MODE3 errata RM#2:
2569 * Achieves better receiver noise performance than the h/w default:
2570 */
2571 m3 = readl(port_mmio + PHY_MODE3);
2572 m3 = (m3 & 0x1f) | (0x5555601 << 5);
bca1c4eb 2573
0388a8c0
ML
2574 /* Guideline 88F5182 (GL# SATA-S11) */
2575 if (IS_SOC(hpriv))
2576 m3 &= ~0x1c;
2577
bca1c4eb 2578 if (fix_phy_mode4) {
ba069e37
ML
2579 u32 m4 = readl(port_mmio + PHY_MODE4);
2580 /*
2581 * Enforce reserved-bit restrictions on GenIIe devices only.
2582 * For earlier chipsets, force only the internal config field
2583 * (workaround for errata FEr SATA#10 part 1).
2584 */
8c30a8b9 2585 if (IS_GEN_IIE(hpriv))
ba069e37
ML
2586 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
2587 else
2588 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
8c30a8b9 2589 writel(m4, port_mmio + PHY_MODE4);
bca1c4eb 2590 }
b406c7a6
ML
2591 /*
2592 * Workaround for 60x1-B2 errata SATA#13:
2593 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
2594 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
2595 */
2596 writel(m3, port_mmio + PHY_MODE3);
bca1c4eb
JG
2597
2598 /* Revert values of pre-emphasis and signal amps to the saved ones */
2599 m2 = readl(port_mmio + PHY_MODE2);
2600
2601 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2602 m2 |= hpriv->signal[port].amps;
2603 m2 |= hpriv->signal[port].pre;
47c2b677 2604 m2 &= ~(1 << 16);
bca1c4eb 2605
e4e7b892
JG
2606 /* according to mvSata 3.6.1, some IIE values are fixed */
2607 if (IS_GEN_IIE(hpriv)) {
2608 m2 &= ~0xC30FF01F;
2609 m2 |= 0x0000900F;
2610 }
2611
bca1c4eb
JG
2612 writel(m2, port_mmio + PHY_MODE2);
2613}
2614
f351b2d6
SB
2615/* TODO: use the generic LED interface to configure the SATA Presence */
2616/* & Acitivy LEDs on the board */
2617static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2618 void __iomem *mmio)
2619{
2620 return;
2621}
2622
2623static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2624 void __iomem *mmio)
2625{
2626 void __iomem *port_mmio;
2627 u32 tmp;
2628
2629 port_mmio = mv_port_base(mmio, idx);
2630 tmp = readl(port_mmio + PHY_MODE2);
2631
2632 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2633 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2634}
2635
2636#undef ZERO
2637#define ZERO(reg) writel(0, port_mmio + (reg))
2638static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2639 void __iomem *mmio, unsigned int port)
2640{
2641 void __iomem *port_mmio = mv_port_base(mmio, port);
2642
e12bef50 2643 mv_reset_channel(hpriv, mmio, port);
f351b2d6
SB
2644
2645 ZERO(0x028); /* command */
2646 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2647 ZERO(0x004); /* timer */
2648 ZERO(0x008); /* irq err cause */
2649 ZERO(0x00c); /* irq err mask */
2650 ZERO(0x010); /* rq bah */
2651 ZERO(0x014); /* rq inp */
2652 ZERO(0x018); /* rq outp */
2653 ZERO(0x01c); /* respq bah */
2654 ZERO(0x024); /* respq outp */
2655 ZERO(0x020); /* respq inp */
2656 ZERO(0x02c); /* test control */
8e7decdb 2657 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
f351b2d6
SB
2658}
2659
2660#undef ZERO
2661
2662#define ZERO(reg) writel(0, hc_mmio + (reg))
2663static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2664 void __iomem *mmio)
2665{
2666 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2667
2668 ZERO(0x00c);
2669 ZERO(0x010);
2670 ZERO(0x014);
2671
2672}
2673
2674#undef ZERO
2675
2676static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2677 void __iomem *mmio, unsigned int n_hc)
2678{
2679 unsigned int port;
2680
2681 for (port = 0; port < hpriv->n_ports; port++)
2682 mv_soc_reset_hc_port(hpriv, mmio, port);
2683
2684 mv_soc_reset_one_hc(hpriv, mmio);
2685
2686 return 0;
2687}
2688
2689static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2690 void __iomem *mmio)
2691{
2692 return;
2693}
2694
2695static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2696{
2697 return;
2698}
2699
8e7decdb 2700static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
b67a1064 2701{
8e7decdb 2702 u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS);
b67a1064 2703
8e7decdb 2704 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
b67a1064 2705 if (want_gen2i)
8e7decdb
ML
2706 ifcfg |= (1 << 7); /* enable gen2i speed */
2707 writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS);
b67a1064
ML
2708}
2709
e12bef50 2710static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
c9d39130
JG
2711 unsigned int port_no)
2712{
2713 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2714
8e7decdb
ML
2715 /*
2716 * The datasheet warns against setting EDMA_RESET when EDMA is active
2717 * (but doesn't say what the problem might be). So we first try
2718 * to disable the EDMA engine before doing the EDMA_RESET operation.
2719 */
0d8be5cb 2720 mv_stop_edma_engine(port_mmio);
8e7decdb 2721 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
c9d39130 2722
b67a1064 2723 if (!IS_GEN_I(hpriv)) {
8e7decdb
ML
2724 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
2725 mv_setup_ifcfg(port_mmio, 1);
c9d39130 2726 }
b67a1064 2727 /*
8e7decdb 2728 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
b67a1064
ML
2729 * link, and physical layers. It resets all SATA interface registers
2730 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
c9d39130 2731 */
8e7decdb 2732 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
b67a1064 2733 udelay(25); /* allow reset propagation */
c9d39130
JG
2734 writelfl(0, port_mmio + EDMA_CMD_OFS);
2735
2736 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2737
ee9ccdf7 2738 if (IS_GEN_I(hpriv))
c9d39130
JG
2739 mdelay(1);
2740}
2741
e49856d8 2742static void mv_pmp_select(struct ata_port *ap, int pmp)
20f733e7 2743{
e49856d8
ML
2744 if (sata_pmp_supported(ap)) {
2745 void __iomem *port_mmio = mv_ap_base(ap);
2746 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2747 int old = reg & 0xf;
22374677 2748
e49856d8
ML
2749 if (old != pmp) {
2750 reg = (reg & ~0xf) | pmp;
2751 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2752 }
22374677 2753 }
20f733e7
BR
2754}
2755
e49856d8
ML
2756static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2757 unsigned long deadline)
22374677 2758{
e49856d8
ML
2759 mv_pmp_select(link->ap, sata_srst_pmp(link));
2760 return sata_std_hardreset(link, class, deadline);
2761}
bdd4ddde 2762
e49856d8
ML
2763static int mv_softreset(struct ata_link *link, unsigned int *class,
2764 unsigned long deadline)
2765{
2766 mv_pmp_select(link->ap, sata_srst_pmp(link));
2767 return ata_sff_softreset(link, class, deadline);
22374677
JG
2768}
2769
cc0680a5 2770static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2771 unsigned long deadline)
31961943 2772{
cc0680a5 2773 struct ata_port *ap = link->ap;
bdd4ddde 2774 struct mv_host_priv *hpriv = ap->host->private_data;
b562468c 2775 struct mv_port_priv *pp = ap->private_data;
f351b2d6 2776 void __iomem *mmio = hpriv->base;
0d8be5cb
ML
2777 int rc, attempts = 0, extra = 0;
2778 u32 sstatus;
2779 bool online;
31961943 2780
e12bef50 2781 mv_reset_channel(hpriv, mmio, ap->port_no);
b562468c 2782 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
bdd4ddde 2783
0d8be5cb
ML
2784 /* Workaround for errata FEr SATA#10 (part 2) */
2785 do {
17c5aab5
ML
2786 const unsigned long *timing =
2787 sata_ehc_deb_timing(&link->eh_context);
bdd4ddde 2788
17c5aab5
ML
2789 rc = sata_link_hardreset(link, timing, deadline + extra,
2790 &online, NULL);
9dcffd99 2791 rc = online ? -EAGAIN : rc;
17c5aab5 2792 if (rc)
0d8be5cb 2793 return rc;
0d8be5cb
ML
2794 sata_scr_read(link, SCR_STATUS, &sstatus);
2795 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2796 /* Force 1.5gb/s link speed and try again */
8e7decdb 2797 mv_setup_ifcfg(mv_ap_base(ap), 0);
0d8be5cb
ML
2798 if (time_after(jiffies + HZ, deadline))
2799 extra = HZ; /* only extend it once, max */
2800 }
2801 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
bdd4ddde 2802
17c5aab5 2803 return rc;
bdd4ddde
JG
2804}
2805
bdd4ddde
JG
2806static void mv_eh_freeze(struct ata_port *ap)
2807{
1cfd19ae 2808 mv_stop_edma(ap);
c4de573b 2809 mv_enable_port_irqs(ap, 0);
bdd4ddde
JG
2810}
2811
2812static void mv_eh_thaw(struct ata_port *ap)
2813{
f351b2d6 2814 struct mv_host_priv *hpriv = ap->host->private_data;
c4de573b
ML
2815 unsigned int port = ap->port_no;
2816 unsigned int hardport = mv_hardport_from_port(port);
1cfd19ae 2817 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
bdd4ddde 2818 void __iomem *port_mmio = mv_ap_base(ap);
c4de573b 2819 u32 hc_irq_cause;
bdd4ddde 2820
bdd4ddde
JG
2821 /* clear EDMA errors on this port */
2822 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2823
2824 /* clear pending irq events */
2825 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1cfd19ae
ML
2826 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
2827 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde 2828
88e675e1 2829 mv_enable_port_irqs(ap, ERR_IRQ);
31961943
BR
2830}
2831
05b308e1
BR
2832/**
2833 * mv_port_init - Perform some early initialization on a single port.
2834 * @port: libata data structure storing shadow register addresses
2835 * @port_mmio: base address of the port
2836 *
2837 * Initialize shadow register mmio addresses, clear outstanding
2838 * interrupts on the port, and unmask interrupts for the future
2839 * start of the port.
2840 *
2841 * LOCKING:
2842 * Inherited from caller.
2843 */
31961943 2844static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2845{
0d5ff566 2846 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2847 unsigned serr_ofs;
2848
8b260248 2849 /* PIO related setup
31961943
BR
2850 */
2851 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2852 port->error_addr =
31961943
BR
2853 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2854 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2855 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2856 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2857 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2858 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2859 port->status_addr =
31961943
BR
2860 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2861 /* special case: control/altstatus doesn't have ATA_REG_ address */
2862 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2863
2864 /* unused: */
8d9db2d2 2865 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2866
31961943
BR
2867 /* Clear any currently outstanding port interrupt conditions */
2868 serr_ofs = mv_scr_offset(SCR_ERROR);
2869 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2870 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2871
646a4da5
ML
2872 /* unmask all non-transient EDMA error interrupts */
2873 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2874
8b260248 2875 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2876 readl(port_mmio + EDMA_CFG_OFS),
2877 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2878 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2879}
2880
616d4a98
ML
2881static unsigned int mv_in_pcix_mode(struct ata_host *host)
2882{
2883 struct mv_host_priv *hpriv = host->private_data;
2884 void __iomem *mmio = hpriv->base;
2885 u32 reg;
2886
1f398472 2887 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
616d4a98
ML
2888 return 0; /* not PCI-X capable */
2889 reg = readl(mmio + MV_PCI_MODE_OFS);
2890 if ((reg & MV_PCI_MODE_MASK) == 0)
2891 return 0; /* conventional PCI mode */
2892 return 1; /* chip is in PCI-X mode */
2893}
2894
2895static int mv_pci_cut_through_okay(struct ata_host *host)
2896{
2897 struct mv_host_priv *hpriv = host->private_data;
2898 void __iomem *mmio = hpriv->base;
2899 u32 reg;
2900
2901 if (!mv_in_pcix_mode(host)) {
2902 reg = readl(mmio + PCI_COMMAND_OFS);
2903 if (reg & PCI_COMMAND_MRDTRIG)
2904 return 0; /* not okay */
2905 }
2906 return 1; /* okay */
2907}
2908
4447d351 2909static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2910{
4447d351
TH
2911 struct pci_dev *pdev = to_pci_dev(host->dev);
2912 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2913 u32 hp_flags = hpriv->hp_flags;
2914
5796d1c4 2915 switch (board_idx) {
47c2b677
JG
2916 case chip_5080:
2917 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2918 hp_flags |= MV_HP_GEN_I;
47c2b677 2919
44c10138 2920 switch (pdev->revision) {
47c2b677
JG
2921 case 0x1:
2922 hp_flags |= MV_HP_ERRATA_50XXB0;
2923 break;
2924 case 0x3:
2925 hp_flags |= MV_HP_ERRATA_50XXB2;
2926 break;
2927 default:
2928 dev_printk(KERN_WARNING, &pdev->dev,
2929 "Applying 50XXB2 workarounds to unknown rev\n");
2930 hp_flags |= MV_HP_ERRATA_50XXB2;
2931 break;
2932 }
2933 break;
2934
bca1c4eb
JG
2935 case chip_504x:
2936 case chip_508x:
47c2b677 2937 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2938 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2939
44c10138 2940 switch (pdev->revision) {
47c2b677
JG
2941 case 0x0:
2942 hp_flags |= MV_HP_ERRATA_50XXB0;
2943 break;
2944 case 0x3:
2945 hp_flags |= MV_HP_ERRATA_50XXB2;
2946 break;
2947 default:
2948 dev_printk(KERN_WARNING, &pdev->dev,
2949 "Applying B2 workarounds to unknown rev\n");
2950 hp_flags |= MV_HP_ERRATA_50XXB2;
2951 break;
bca1c4eb
JG
2952 }
2953 break;
2954
2955 case chip_604x:
2956 case chip_608x:
47c2b677 2957 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2958 hp_flags |= MV_HP_GEN_II;
47c2b677 2959
44c10138 2960 switch (pdev->revision) {
47c2b677
JG
2961 case 0x7:
2962 hp_flags |= MV_HP_ERRATA_60X1B2;
2963 break;
2964 case 0x9:
2965 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2966 break;
2967 default:
2968 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2969 "Applying B2 workarounds to unknown rev\n");
2970 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2971 break;
2972 }
2973 break;
2974
e4e7b892 2975 case chip_7042:
616d4a98 2976 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
306b30f7
ML
2977 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2978 (pdev->device == 0x2300 || pdev->device == 0x2310))
2979 {
4e520033
ML
2980 /*
2981 * Highpoint RocketRAID PCIe 23xx series cards:
2982 *
2983 * Unconfigured drives are treated as "Legacy"
2984 * by the BIOS, and it overwrites sector 8 with
2985 * a "Lgcy" metadata block prior to Linux boot.
2986 *
2987 * Configured drives (RAID or JBOD) leave sector 8
2988 * alone, but instead overwrite a high numbered
2989 * sector for the RAID metadata. This sector can
2990 * be determined exactly, by truncating the physical
2991 * drive capacity to a nice even GB value.
2992 *
2993 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2994 *
2995 * Warn the user, lest they think we're just buggy.
2996 */
2997 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2998 " BIOS CORRUPTS DATA on all attached drives,"
2999 " regardless of if/how they are configured."
3000 " BEWARE!\n");
3001 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3002 " use sectors 8-9 on \"Legacy\" drives,"
3003 " and avoid the final two gigabytes on"
3004 " all RocketRAID BIOS initialized drives.\n");
306b30f7 3005 }
8e7decdb 3006 /* drop through */
e4e7b892
JG
3007 case chip_6042:
3008 hpriv->ops = &mv6xxx_ops;
e4e7b892 3009 hp_flags |= MV_HP_GEN_IIE;
616d4a98
ML
3010 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3011 hp_flags |= MV_HP_CUT_THROUGH;
e4e7b892 3012
44c10138 3013 switch (pdev->revision) {
5cf73bfb 3014 case 0x2: /* Rev.B0: the first/only public release */
e4e7b892
JG
3015 hp_flags |= MV_HP_ERRATA_60X1C0;
3016 break;
3017 default:
3018 dev_printk(KERN_WARNING, &pdev->dev,
3019 "Applying 60X1C0 workarounds to unknown rev\n");
3020 hp_flags |= MV_HP_ERRATA_60X1C0;
3021 break;
3022 }
3023 break;
f351b2d6
SB
3024 case chip_soc:
3025 hpriv->ops = &mv_soc_ops;
eb3a55a9
SB
3026 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3027 MV_HP_ERRATA_60X1C0;
f351b2d6 3028 break;
e4e7b892 3029
bca1c4eb 3030 default:
f351b2d6 3031 dev_printk(KERN_ERR, host->dev,
5796d1c4 3032 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
3033 return 1;
3034 }
3035
3036 hpriv->hp_flags = hp_flags;
02a121da
ML
3037 if (hp_flags & MV_HP_PCIE) {
3038 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
3039 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
3040 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3041 } else {
3042 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
3043 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
3044 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3045 }
bca1c4eb
JG
3046
3047 return 0;
3048}
3049
05b308e1 3050/**
47c2b677 3051 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
3052 * @host: ATA host to initialize
3053 * @board_idx: controller index
05b308e1
BR
3054 *
3055 * If possible, do an early global reset of the host. Then do
3056 * our port init and clear/unmask all/relevant host interrupts.
3057 *
3058 * LOCKING:
3059 * Inherited from caller.
3060 */
4447d351 3061static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
3062{
3063 int rc = 0, n_hc, port, hc;
4447d351 3064 struct mv_host_priv *hpriv = host->private_data;
f351b2d6 3065 void __iomem *mmio = hpriv->base;
47c2b677 3066
4447d351 3067 rc = mv_chip_id(host, board_idx);
bca1c4eb 3068 if (rc)
352fab70 3069 goto done;
f351b2d6 3070
1f398472 3071 if (IS_SOC(hpriv)) {
7368f919
ML
3072 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS;
3073 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS;
1f398472
ML
3074 } else {
3075 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
3076 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
f351b2d6 3077 }
352fab70
ML
3078
3079 /* global interrupt mask: 0 == mask everything */
c4de573b 3080 mv_set_main_irq_mask(host, ~0, 0);
bca1c4eb 3081
4447d351 3082 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 3083
4447d351 3084 for (port = 0; port < host->n_ports; port++)
47c2b677 3085 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 3086
c9d39130 3087 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 3088 if (rc)
20f733e7 3089 goto done;
20f733e7 3090
522479fb 3091 hpriv->ops->reset_flash(hpriv, mmio);
7bb3c529 3092 hpriv->ops->reset_bus(host, mmio);
47c2b677 3093 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 3094
4447d351 3095 for (port = 0; port < host->n_ports; port++) {
cbcdd875 3096 struct ata_port *ap = host->ports[port];
2a47ce06 3097 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
3098
3099 mv_port_init(&ap->ioaddr, port_mmio);
3100
7bb3c529 3101#ifdef CONFIG_PCI
1f398472 3102 if (!IS_SOC(hpriv)) {
f351b2d6
SB
3103 unsigned int offset = port_mmio - mmio;
3104 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
3105 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
3106 }
7bb3c529 3107#endif
20f733e7
BR
3108 }
3109
3110 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
3111 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3112
3113 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3114 "(before clear)=0x%08x\n", hc,
3115 readl(hc_mmio + HC_CFG_OFS),
3116 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
3117
3118 /* Clear any currently outstanding hc interrupt conditions */
3119 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
3120 }
3121
1f398472 3122 if (!IS_SOC(hpriv)) {
f351b2d6
SB
3123 /* Clear any currently outstanding host interrupt conditions */
3124 writelfl(0, mmio + hpriv->irq_cause_ofs);
31961943 3125
f351b2d6
SB
3126 /* and unmask interrupt generation for host regs */
3127 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
51de32d2
ML
3128
3129 /*
3130 * enable only global host interrupts for now.
3131 * The per-port interrupts get done later as ports are set up.
3132 */
c4de573b 3133 mv_set_main_irq_mask(host, 0, PCI_ERR);
f351b2d6
SB
3134 }
3135done:
3136 return rc;
3137}
fb621e2f 3138
fbf14e2f
BB
3139static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3140{
3141 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3142 MV_CRQB_Q_SZ, 0);
3143 if (!hpriv->crqb_pool)
3144 return -ENOMEM;
3145
3146 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3147 MV_CRPB_Q_SZ, 0);
3148 if (!hpriv->crpb_pool)
3149 return -ENOMEM;
3150
3151 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3152 MV_SG_TBL_SZ, 0);
3153 if (!hpriv->sg_tbl_pool)
3154 return -ENOMEM;
3155
3156 return 0;
3157}
3158
15a32632
LB
3159static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3160 struct mbus_dram_target_info *dram)
3161{
3162 int i;
3163
3164 for (i = 0; i < 4; i++) {
3165 writel(0, hpriv->base + WINDOW_CTRL(i));
3166 writel(0, hpriv->base + WINDOW_BASE(i));
3167 }
3168
3169 for (i = 0; i < dram->num_cs; i++) {
3170 struct mbus_dram_window *cs = dram->cs + i;
3171
3172 writel(((cs->size - 1) & 0xffff0000) |
3173 (cs->mbus_attr << 8) |
3174 (dram->mbus_dram_target_id << 4) | 1,
3175 hpriv->base + WINDOW_CTRL(i));
3176 writel(cs->base, hpriv->base + WINDOW_BASE(i));
3177 }
3178}
3179
f351b2d6
SB
3180/**
3181 * mv_platform_probe - handle a positive probe of an soc Marvell
3182 * host
3183 * @pdev: platform device found
3184 *
3185 * LOCKING:
3186 * Inherited from caller.
3187 */
3188static int mv_platform_probe(struct platform_device *pdev)
3189{
3190 static int printed_version;
3191 const struct mv_sata_platform_data *mv_platform_data;
3192 const struct ata_port_info *ppi[] =
3193 { &mv_port_info[chip_soc], NULL };
3194 struct ata_host *host;
3195 struct mv_host_priv *hpriv;
3196 struct resource *res;
3197 int n_ports, rc;
20f733e7 3198
f351b2d6
SB
3199 if (!printed_version++)
3200 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
bca1c4eb 3201
f351b2d6
SB
3202 /*
3203 * Simple resource validation ..
3204 */
3205 if (unlikely(pdev->num_resources != 2)) {
3206 dev_err(&pdev->dev, "invalid number of resources\n");
3207 return -EINVAL;
3208 }
3209
3210 /*
3211 * Get the register base first
3212 */
3213 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3214 if (res == NULL)
3215 return -EINVAL;
3216
3217 /* allocate host */
3218 mv_platform_data = pdev->dev.platform_data;
3219 n_ports = mv_platform_data->n_ports;
3220
3221 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3222 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3223
3224 if (!host || !hpriv)
3225 return -ENOMEM;
3226 host->private_data = hpriv;
3227 hpriv->n_ports = n_ports;
3228
3229 host->iomap = NULL;
f1cb0ea1
SB
3230 hpriv->base = devm_ioremap(&pdev->dev, res->start,
3231 res->end - res->start + 1);
f351b2d6
SB
3232 hpriv->base -= MV_SATAHC0_REG_BASE;
3233
15a32632
LB
3234 /*
3235 * (Re-)program MBUS remapping windows if we are asked to.
3236 */
3237 if (mv_platform_data->dram != NULL)
3238 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
3239
fbf14e2f
BB
3240 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3241 if (rc)
3242 return rc;
3243
f351b2d6
SB
3244 /* initialize adapter */
3245 rc = mv_init_host(host, chip_soc);
3246 if (rc)
3247 return rc;
3248
3249 dev_printk(KERN_INFO, &pdev->dev,
3250 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
3251 host->n_ports);
3252
3253 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
3254 IRQF_SHARED, &mv6_sht);
3255}
3256
3257/*
3258 *
3259 * mv_platform_remove - unplug a platform interface
3260 * @pdev: platform device
3261 *
3262 * A platform bus SATA device has been unplugged. Perform the needed
3263 * cleanup. Also called on module unload for any active devices.
3264 */
3265static int __devexit mv_platform_remove(struct platform_device *pdev)
3266{
3267 struct device *dev = &pdev->dev;
3268 struct ata_host *host = dev_get_drvdata(dev);
f351b2d6
SB
3269
3270 ata_host_detach(host);
f351b2d6 3271 return 0;
20f733e7
BR
3272}
3273
f351b2d6
SB
3274static struct platform_driver mv_platform_driver = {
3275 .probe = mv_platform_probe,
3276 .remove = __devexit_p(mv_platform_remove),
3277 .driver = {
3278 .name = DRV_NAME,
3279 .owner = THIS_MODULE,
3280 },
3281};
3282
3283
7bb3c529 3284#ifdef CONFIG_PCI
f351b2d6
SB
3285static int mv_pci_init_one(struct pci_dev *pdev,
3286 const struct pci_device_id *ent);
3287
7bb3c529
SB
3288
3289static struct pci_driver mv_pci_driver = {
3290 .name = DRV_NAME,
3291 .id_table = mv_pci_tbl,
f351b2d6 3292 .probe = mv_pci_init_one,
7bb3c529
SB
3293 .remove = ata_pci_remove_one,
3294};
3295
3296/*
3297 * module options
3298 */
3299static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
3300
3301
3302/* move to PCI layer or libata core? */
3303static int pci_go_64(struct pci_dev *pdev)
3304{
3305 int rc;
3306
3307 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3308 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3309 if (rc) {
3310 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3311 if (rc) {
3312 dev_printk(KERN_ERR, &pdev->dev,
3313 "64-bit DMA enable failed\n");
3314 return rc;
3315 }
3316 }
3317 } else {
3318 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3319 if (rc) {
3320 dev_printk(KERN_ERR, &pdev->dev,
3321 "32-bit DMA enable failed\n");
3322 return rc;
3323 }
3324 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3325 if (rc) {
3326 dev_printk(KERN_ERR, &pdev->dev,
3327 "32-bit consistent DMA enable failed\n");
3328 return rc;
3329 }
3330 }
3331
3332 return rc;
3333}
3334
05b308e1
BR
3335/**
3336 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 3337 * @host: ATA host to print info about
05b308e1
BR
3338 *
3339 * FIXME: complete this.
3340 *
3341 * LOCKING:
3342 * Inherited from caller.
3343 */
4447d351 3344static void mv_print_info(struct ata_host *host)
31961943 3345{
4447d351
TH
3346 struct pci_dev *pdev = to_pci_dev(host->dev);
3347 struct mv_host_priv *hpriv = host->private_data;
44c10138 3348 u8 scc;
c1e4fe71 3349 const char *scc_s, *gen;
31961943
BR
3350
3351 /* Use this to determine the HW stepping of the chip so we know
3352 * what errata to workaround
3353 */
31961943
BR
3354 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3355 if (scc == 0)
3356 scc_s = "SCSI";
3357 else if (scc == 0x01)
3358 scc_s = "RAID";
3359 else
c1e4fe71
JG
3360 scc_s = "?";
3361
3362 if (IS_GEN_I(hpriv))
3363 gen = "I";
3364 else if (IS_GEN_II(hpriv))
3365 gen = "II";
3366 else if (IS_GEN_IIE(hpriv))
3367 gen = "IIE";
3368 else
3369 gen = "?";
31961943 3370
a9524a76 3371 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
3372 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3373 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
3374 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3375}
3376
05b308e1 3377/**
f351b2d6 3378 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
05b308e1
BR
3379 * @pdev: PCI device found
3380 * @ent: PCI device ID entry for the matched host
3381 *
3382 * LOCKING:
3383 * Inherited from caller.
3384 */
f351b2d6
SB
3385static int mv_pci_init_one(struct pci_dev *pdev,
3386 const struct pci_device_id *ent)
20f733e7 3387{
2dcb407e 3388 static int printed_version;
20f733e7 3389 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
3390 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3391 struct ata_host *host;
3392 struct mv_host_priv *hpriv;
3393 int n_ports, rc;
20f733e7 3394
a9524a76
JG
3395 if (!printed_version++)
3396 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 3397
4447d351
TH
3398 /* allocate host */
3399 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3400
3401 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3402 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3403 if (!host || !hpriv)
3404 return -ENOMEM;
3405 host->private_data = hpriv;
f351b2d6 3406 hpriv->n_ports = n_ports;
4447d351
TH
3407
3408 /* acquire resources */
24dc5f33
TH
3409 rc = pcim_enable_device(pdev);
3410 if (rc)
20f733e7 3411 return rc;
20f733e7 3412
0d5ff566
TH
3413 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3414 if (rc == -EBUSY)
24dc5f33 3415 pcim_pin_device(pdev);
0d5ff566 3416 if (rc)
24dc5f33 3417 return rc;
4447d351 3418 host->iomap = pcim_iomap_table(pdev);
f351b2d6 3419 hpriv->base = host->iomap[MV_PRIMARY_BAR];
20f733e7 3420
d88184fb
JG
3421 rc = pci_go_64(pdev);
3422 if (rc)
3423 return rc;
3424
da2fa9ba
ML
3425 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3426 if (rc)
3427 return rc;
3428
20f733e7 3429 /* initialize adapter */
4447d351 3430 rc = mv_init_host(host, board_idx);
24dc5f33
TH
3431 if (rc)
3432 return rc;
20f733e7 3433
31961943 3434 /* Enable interrupts */
6a59dcf8 3435 if (msi && pci_enable_msi(pdev))
31961943 3436 pci_intx(pdev, 1);
20f733e7 3437
31961943 3438 mv_dump_pci_cfg(pdev, 0x68);
4447d351 3439 mv_print_info(host);
20f733e7 3440
4447d351 3441 pci_set_master(pdev);
ea8b4db9 3442 pci_try_set_mwi(pdev);
4447d351 3443 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 3444 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7 3445}
7bb3c529 3446#endif
20f733e7 3447
f351b2d6
SB
3448static int mv_platform_probe(struct platform_device *pdev);
3449static int __devexit mv_platform_remove(struct platform_device *pdev);
3450
20f733e7
BR
3451static int __init mv_init(void)
3452{
7bb3c529
SB
3453 int rc = -ENODEV;
3454#ifdef CONFIG_PCI
3455 rc = pci_register_driver(&mv_pci_driver);
f351b2d6
SB
3456 if (rc < 0)
3457 return rc;
3458#endif
3459 rc = platform_driver_register(&mv_platform_driver);
3460
3461#ifdef CONFIG_PCI
3462 if (rc < 0)
3463 pci_unregister_driver(&mv_pci_driver);
7bb3c529
SB
3464#endif
3465 return rc;
20f733e7
BR
3466}
3467
3468static void __exit mv_exit(void)
3469{
7bb3c529 3470#ifdef CONFIG_PCI
20f733e7 3471 pci_unregister_driver(&mv_pci_driver);
7bb3c529 3472#endif
f351b2d6 3473 platform_driver_unregister(&mv_platform_driver);
20f733e7
BR
3474}
3475
3476MODULE_AUTHOR("Brett Russ");
3477MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3478MODULE_LICENSE("GPL");
3479MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3480MODULE_VERSION(DRV_VERSION);
17c5aab5 3481MODULE_ALIAS("platform:" DRV_NAME);
20f733e7 3482
7bb3c529 3483#ifdef CONFIG_PCI
ddef9bb3
JG
3484module_param(msi, int, 0444);
3485MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
7bb3c529 3486#endif
ddef9bb3 3487
20f733e7
BR
3488module_init(mv_init);
3489module_exit(mv_exit);