]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/sata_mv.c
sata_mv: increase PIO IORDY timeout
[net-next-2.6.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
40f21b11 4 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
8b260248 5 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 6 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7 7 *
40f21b11
ML
8 * Originally written by Brett Russ.
9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
10 *
20f733e7
BR
11 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; version 2 of the License.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
4a05e209 28/*
85afb934
ML
29 * sata_mv TODO list:
30 *
85afb934
ML
31 * --> Develop a low-power-consumption strategy, and implement it.
32 *
2b748a0a 33 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
85afb934
ML
34 *
35 * --> [Experiment, Marvell value added] Is it possible to use target
36 * mode to cross-connect two Linux boxes with Marvell cards? If so,
37 * creating LibATA target mode support would be very interesting.
38 *
39 * Target mode, for those without docs, is the ability to directly
40 * connect two SATA ports.
41 */
4a05e209 42
65ad7fef
ML
43/*
44 * 80x1-B2 errata PCI#11:
45 *
46 * Users of the 6041/6081 Rev.B2 chips (current is C0)
47 * should be careful to insert those cards only onto PCI-X bus #0,
48 * and only in device slots 0..7, not higher. The chips may not
49 * work correctly otherwise (note: this is a pretty rare condition).
50 */
51
20f733e7
BR
52#include <linux/kernel.h>
53#include <linux/module.h>
54#include <linux/pci.h>
55#include <linux/init.h>
56#include <linux/blkdev.h>
57#include <linux/delay.h>
58#include <linux/interrupt.h>
8d8b6004 59#include <linux/dmapool.h>
20f733e7 60#include <linux/dma-mapping.h>
a9524a76 61#include <linux/device.h>
f351b2d6
SB
62#include <linux/platform_device.h>
63#include <linux/ata_platform.h>
15a32632 64#include <linux/mbus.h>
c46938cc 65#include <linux/bitops.h>
20f733e7 66#include <scsi/scsi_host.h>
193515d5 67#include <scsi/scsi_cmnd.h>
6c08772e 68#include <scsi/scsi_device.h>
20f733e7 69#include <linux/libata.h>
20f733e7
BR
70
71#define DRV_NAME "sata_mv"
cae5a29d 72#define DRV_VERSION "1.28"
20f733e7 73
40f21b11
ML
74/*
75 * module options
76 */
77
78static int msi;
79#ifdef CONFIG_PCI
80module_param(msi, int, S_IRUGO);
81MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
82#endif
83
2b748a0a
ML
84static int irq_coalescing_io_count;
85module_param(irq_coalescing_io_count, int, S_IRUGO);
86MODULE_PARM_DESC(irq_coalescing_io_count,
87 "IRQ coalescing I/O count threshold (0..255)");
88
89static int irq_coalescing_usecs;
90module_param(irq_coalescing_usecs, int, S_IRUGO);
91MODULE_PARM_DESC(irq_coalescing_usecs,
92 "IRQ coalescing time threshold in usecs");
93
20f733e7
BR
94enum {
95 /* BAR's are enumerated in terms of pci_resource_start() terms */
96 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
97 MV_IO_BAR = 2, /* offset 0x18: IO space */
98 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
99
100 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
101 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
102
2b748a0a
ML
103 /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
104 COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
105 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
106 MAX_COAL_IO_COUNT = 255, /* completed I/O count */
107
20f733e7 108 MV_PCI_REG_BASE = 0,
615ab953 109
2b748a0a
ML
110 /*
111 * Per-chip ("all ports") interrupt coalescing feature.
112 * This is only for GEN_II / GEN_IIE hardware.
113 *
114 * Coalescing defers the interrupt until either the IO_THRESHOLD
115 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
116 */
cae5a29d
ML
117 COAL_REG_BASE = 0x18000,
118 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
2b748a0a
ML
119 ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
120
cae5a29d
ML
121 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
122 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
2b748a0a
ML
123
124 /*
125 * Registers for the (unused here) transaction coalescing feature:
126 */
cae5a29d
ML
127 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
128 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
2b748a0a 129
cae5a29d
ML
130 SATAHC0_REG_BASE = 0x20000,
131 FLASH_CTL = 0x1046c,
132 GPIO_PORT_CTL = 0x104f0,
133 RESET_CFG = 0x180d8,
20f733e7
BR
134
135 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
136 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
137 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
138 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
139
31961943
BR
140 MV_MAX_Q_DEPTH = 32,
141 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
142
143 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
144 * CRPB needs alignment on a 256B boundary. Size == 256B
31961943
BR
145 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
146 */
147 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
148 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
da2fa9ba 149 MV_MAX_SG_CT = 256,
31961943 150 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
31961943 151
352fab70 152 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
20f733e7 153 MV_PORT_HC_SHIFT = 2,
352fab70
ML
154 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
155 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
156 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
20f733e7
BR
157
158 /* Host Flags */
159 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
7bb3c529 160
c5d3e45a 161 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
91b1a84c 162 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
ad3aef51 163
91b1a84c 164 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
20f733e7 165
40f21b11
ML
166 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
167 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
91b1a84c
ML
168
169 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
ad3aef51 170
31961943
BR
171 CRQB_FLAG_READ = (1 << 0),
172 CRQB_TAG_SHIFT = 1,
c5d3e45a 173 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
e12bef50 174 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
c5d3e45a 175 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
176 CRQB_CMD_ADDR_SHIFT = 8,
177 CRQB_CMD_CS = (0x2 << 11),
178 CRQB_CMD_LAST = (1 << 15),
179
180 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
181 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
182 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
183
184 EPRD_FLAG_END_OF_TBL = (1 << 31),
185
20f733e7
BR
186 /* PCI interface registers */
187
cae5a29d
ML
188 MV_PCI_COMMAND = 0xc00,
189 MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
190 MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
31961943 191
cae5a29d 192 PCI_MAIN_CMD_STS = 0xd30,
20f733e7
BR
193 STOP_PCI_MASTER = (1 << 2),
194 PCI_MASTER_EMPTY = (1 << 3),
195 GLOB_SFT_RST = (1 << 4),
196
cae5a29d 197 MV_PCI_MODE = 0xd00,
8e7decdb
ML
198 MV_PCI_MODE_MASK = 0x30,
199
522479fb
JG
200 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
201 MV_PCI_DISC_TIMER = 0xd04,
202 MV_PCI_MSI_TRIGGER = 0xc38,
203 MV_PCI_SERR_MASK = 0xc28,
cae5a29d 204 MV_PCI_XBAR_TMOUT = 0x1d04,
522479fb
JG
205 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
206 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
207 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
208 MV_PCI_ERR_COMMAND = 0x1d50,
209
cae5a29d
ML
210 PCI_IRQ_CAUSE = 0x1d58,
211 PCI_IRQ_MASK = 0x1d5c,
20f733e7
BR
212 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
213
cae5a29d
ML
214 PCIE_IRQ_CAUSE = 0x1900,
215 PCIE_IRQ_MASK = 0x1910,
646a4da5 216 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 217
7368f919 218 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
cae5a29d
ML
219 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
220 PCI_HC_MAIN_IRQ_MASK = 0x1d64,
221 SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
222 SOC_HC_MAIN_IRQ_MASK = 0x20024,
40f21b11
ML
223 ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
224 DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
20f733e7
BR
225 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
226 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
2b748a0a
ML
227 DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
228 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
20f733e7 229 PCI_ERR = (1 << 18),
40f21b11
ML
230 TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
231 TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
232 PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
233 PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
234 ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
20f733e7
BR
235 GPIO_INT = (1 << 22),
236 SELF_INT = (1 << 23),
237 TWSI_INT = (1 << 24),
238 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 239 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
e12bef50 240 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
20f733e7
BR
241
242 /* SATAHC registers */
cae5a29d 243 HC_CFG = 0x00,
20f733e7 244
cae5a29d 245 HC_IRQ_CAUSE = 0x14,
352fab70
ML
246 DMA_IRQ = (1 << 0), /* shift by port # */
247 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
20f733e7
BR
248 DEV_IRQ = (1 << 8), /* shift by port # */
249
2b748a0a
ML
250 /*
251 * Per-HC (Host-Controller) interrupt coalescing feature.
252 * This is present on all chip generations.
253 *
254 * Coalescing defers the interrupt until either the IO_THRESHOLD
255 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
256 */
cae5a29d
ML
257 HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
258 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
2b748a0a 259
cae5a29d 260 SOC_LED_CTRL = 0x2c,
000b344f
ML
261 SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
262 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
263 /* with dev activity LED */
264
20f733e7 265 /* Shadow block registers */
cae5a29d
ML
266 SHD_BLK = 0x100,
267 SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */
20f733e7
BR
268
269 /* SATA registers */
cae5a29d
ML
270 SATA_STATUS = 0x300, /* ctrl, err regs follow status */
271 SATA_ACTIVE = 0x350,
272 FIS_IRQ_CAUSE = 0x364,
273 FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */
17c5aab5 274
cae5a29d 275 LTMODE = 0x30c, /* requires read-after-write */
17c5aab5
ML
276 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
277
cae5a29d 278 PHY_MODE2 = 0x330,
47c2b677 279 PHY_MODE3 = 0x310,
cae5a29d
ML
280
281 PHY_MODE4 = 0x314, /* requires read-after-write */
ba069e37
ML
282 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
283 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
284 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
285 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
286
cae5a29d
ML
287 SATA_IFCTL = 0x344,
288 SATA_TESTCTL = 0x348,
289 SATA_IFSTAT = 0x34c,
290 VENDOR_UNIQUE_FIS = 0x35c,
17c5aab5 291
cae5a29d 292 FISCFG = 0x360,
8e7decdb
ML
293 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
294 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
17c5aab5 295
29b7e43c
MM
296 PHY_MODE9_GEN2 = 0x398,
297 PHY_MODE9_GEN1 = 0x39c,
298 PHYCFG_OFS = 0x3a0, /* only in 65n devices */
299
c9d39130 300 MV5_PHY_MODE = 0x74,
cae5a29d
ML
301 MV5_LTMODE = 0x30,
302 MV5_PHY_CTL = 0x0C,
303 SATA_IFCFG = 0x050,
bca1c4eb
JG
304
305 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
306
307 /* Port registers */
cae5a29d 308 EDMA_CFG = 0,
0c58912e
ML
309 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
310 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
311 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
312 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
313 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
e12bef50
ML
314 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
315 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
20f733e7 316
cae5a29d
ML
317 EDMA_ERR_IRQ_CAUSE = 0x8,
318 EDMA_ERR_IRQ_MASK = 0xc,
6c1153e0
JG
319 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
320 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
321 EDMA_ERR_DEV = (1 << 2), /* device error */
322 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
323 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
324 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
325 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
326 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 327 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 328 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
329 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
330 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
331 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
332 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 333
6c1153e0 334 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
335 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
336 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
337 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
338 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
339
6c1153e0 340 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 341
6c1153e0 342 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
343 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
344 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
345 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
346 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
347 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
348
6c1153e0 349 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 350
6c1153e0 351 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
352 EDMA_ERR_OVERRUN_5 = (1 << 5),
353 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
354
355 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
356 EDMA_ERR_LNK_CTRL_RX_1 |
357 EDMA_ERR_LNK_CTRL_RX_3 |
85afb934 358 EDMA_ERR_LNK_CTRL_TX,
646a4da5 359
bdd4ddde
JG
360 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
361 EDMA_ERR_PRD_PAR |
362 EDMA_ERR_DEV_DCON |
363 EDMA_ERR_DEV_CON |
364 EDMA_ERR_SERR |
365 EDMA_ERR_SELF_DIS |
6c1153e0 366 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
367 EDMA_ERR_CRPB_PAR |
368 EDMA_ERR_INTRL_PAR |
369 EDMA_ERR_IORDY |
370 EDMA_ERR_LNK_CTRL_RX_2 |
371 EDMA_ERR_LNK_DATA_RX |
372 EDMA_ERR_LNK_DATA_TX |
373 EDMA_ERR_TRANS_PROTO,
e12bef50 374
bdd4ddde
JG
375 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
376 EDMA_ERR_PRD_PAR |
377 EDMA_ERR_DEV_DCON |
378 EDMA_ERR_DEV_CON |
379 EDMA_ERR_OVERRUN_5 |
380 EDMA_ERR_UNDERRUN_5 |
381 EDMA_ERR_SELF_DIS_5 |
6c1153e0 382 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
383 EDMA_ERR_CRPB_PAR |
384 EDMA_ERR_INTRL_PAR |
385 EDMA_ERR_IORDY,
20f733e7 386
cae5a29d
ML
387 EDMA_REQ_Q_BASE_HI = 0x10,
388 EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */
31961943 389
cae5a29d 390 EDMA_REQ_Q_OUT_PTR = 0x18,
31961943
BR
391 EDMA_REQ_Q_PTR_SHIFT = 5,
392
cae5a29d
ML
393 EDMA_RSP_Q_BASE_HI = 0x1c,
394 EDMA_RSP_Q_IN_PTR = 0x20,
395 EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */
31961943
BR
396 EDMA_RSP_Q_PTR_SHIFT = 3,
397
cae5a29d 398 EDMA_CMD = 0x28, /* EDMA command register */
0ea9e179
JG
399 EDMA_EN = (1 << 0), /* enable EDMA */
400 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
8e7decdb
ML
401 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
402
cae5a29d 403 EDMA_STATUS = 0x30, /* EDMA engine status */
8e7decdb
ML
404 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
405 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
20f733e7 406
cae5a29d
ML
407 EDMA_IORDY_TMOUT = 0x34,
408 EDMA_ARB_CFG = 0x38,
8e7decdb 409
cae5a29d
ML
410 EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */
411 EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */
da14265e 412
cae5a29d
ML
413 BMDMA_CMD = 0x224, /* bmdma command register */
414 BMDMA_STATUS = 0x228, /* bmdma status register */
415 BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */
416 BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */
da14265e 417
31961943
BR
418 /* Host private flags (hp_flags) */
419 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
420 MV_HP_ERRATA_50XXB0 = (1 << 1),
421 MV_HP_ERRATA_50XXB2 = (1 << 2),
422 MV_HP_ERRATA_60X1B2 = (1 << 3),
423 MV_HP_ERRATA_60X1C0 = (1 << 4),
0ea9e179
JG
424 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
425 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
426 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 427 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
616d4a98 428 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
1f398472 429 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
000b344f 430 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
20f733e7 431
31961943 432 /* Port private flags (pp_flags) */
0ea9e179 433 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
72109168 434 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
00f42eab 435 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
29d187bb 436 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
d16ab3f6 437 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
20f733e7
BR
438};
439
ee9ccdf7
JG
440#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
441#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 442#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
8e7decdb 443#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
1f398472 444#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
bca1c4eb 445
15a32632
LB
446#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
447#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
448
095fec88 449enum {
baf14aa1
JG
450 /* DMA boundary 0xffff is required by the s/g splitting
451 * we need on /length/ in mv_fill-sg().
452 */
453 MV_DMA_BOUNDARY = 0xffffU,
095fec88 454
0ea9e179
JG
455 /* mask of register bits containing lower 32 bits
456 * of EDMA request queue DMA address
457 */
095fec88
JG
458 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
459
0ea9e179 460 /* ditto, for response queue */
095fec88
JG
461 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
462};
463
522479fb
JG
464enum chip_type {
465 chip_504x,
466 chip_508x,
467 chip_5080,
468 chip_604x,
469 chip_608x,
e4e7b892
JG
470 chip_6042,
471 chip_7042,
f351b2d6 472 chip_soc,
522479fb
JG
473};
474
31961943
BR
475/* Command ReQuest Block: 32B */
476struct mv_crqb {
e1469874
ML
477 __le32 sg_addr;
478 __le32 sg_addr_hi;
479 __le16 ctrl_flags;
480 __le16 ata_cmd[11];
31961943 481};
20f733e7 482
e4e7b892 483struct mv_crqb_iie {
e1469874
ML
484 __le32 addr;
485 __le32 addr_hi;
486 __le32 flags;
487 __le32 len;
488 __le32 ata_cmd[4];
e4e7b892
JG
489};
490
31961943
BR
491/* Command ResPonse Block: 8B */
492struct mv_crpb {
e1469874
ML
493 __le16 id;
494 __le16 flags;
495 __le32 tmstmp;
20f733e7
BR
496};
497
31961943
BR
498/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
499struct mv_sg {
e1469874
ML
500 __le32 addr;
501 __le32 flags_size;
502 __le32 addr_hi;
503 __le32 reserved;
31961943 504};
20f733e7 505
08da1759
ML
506/*
507 * We keep a local cache of a few frequently accessed port
508 * registers here, to avoid having to read them (very slow)
509 * when switching between EDMA and non-EDMA modes.
510 */
511struct mv_cached_regs {
512 u32 fiscfg;
513 u32 ltmode;
514 u32 haltcond;
c01e8a23 515 u32 unknown_rsvd;
08da1759
ML
516};
517
31961943
BR
518struct mv_port_priv {
519 struct mv_crqb *crqb;
520 dma_addr_t crqb_dma;
521 struct mv_crpb *crpb;
522 dma_addr_t crpb_dma;
eb73d558
ML
523 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
524 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
bdd4ddde
JG
525
526 unsigned int req_idx;
527 unsigned int resp_idx;
528
31961943 529 u32 pp_flags;
08da1759 530 struct mv_cached_regs cached;
29d187bb 531 unsigned int delayed_eh_pmp_map;
31961943
BR
532};
533
bca1c4eb
JG
534struct mv_port_signal {
535 u32 amps;
536 u32 pre;
537};
538
02a121da
ML
539struct mv_host_priv {
540 u32 hp_flags;
96e2c487 541 u32 main_irq_mask;
02a121da
ML
542 struct mv_port_signal signal[8];
543 const struct mv_hw_ops *ops;
f351b2d6
SB
544 int n_ports;
545 void __iomem *base;
7368f919
ML
546 void __iomem *main_irq_cause_addr;
547 void __iomem *main_irq_mask_addr;
cae5a29d
ML
548 u32 irq_cause_offset;
549 u32 irq_mask_offset;
02a121da 550 u32 unmask_all_irqs;
da2fa9ba
ML
551 /*
552 * These consistent DMA memory pools give us guaranteed
553 * alignment for hardware-accessed data structures,
554 * and less memory waste in accomplishing the alignment.
555 */
556 struct dma_pool *crqb_pool;
557 struct dma_pool *crpb_pool;
558 struct dma_pool *sg_tbl_pool;
02a121da
ML
559};
560
47c2b677 561struct mv_hw_ops {
2a47ce06
JG
562 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
563 unsigned int port);
47c2b677
JG
564 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
565 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
566 void __iomem *mmio);
c9d39130
JG
567 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
568 unsigned int n_hc);
522479fb 569 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 570 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
47c2b677
JG
571};
572
82ef04fb
TH
573static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
574static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
575static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
576static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
31961943
BR
577static int mv_port_start(struct ata_port *ap);
578static void mv_port_stop(struct ata_port *ap);
3e4a1391 579static int mv_qc_defer(struct ata_queued_cmd *qc);
31961943 580static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 581static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 582static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
a1efdaba
TH
583static int mv_hardreset(struct ata_link *link, unsigned int *class,
584 unsigned long deadline);
bdd4ddde
JG
585static void mv_eh_freeze(struct ata_port *ap);
586static void mv_eh_thaw(struct ata_port *ap);
f273827e 587static void mv6_dev_config(struct ata_device *dev);
20f733e7 588
2a47ce06
JG
589static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
590 unsigned int port);
47c2b677
JG
591static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
592static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
593 void __iomem *mmio);
c9d39130
JG
594static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
595 unsigned int n_hc);
522479fb 596static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 597static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
47c2b677 598
2a47ce06
JG
599static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
600 unsigned int port);
47c2b677
JG
601static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
602static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
603 void __iomem *mmio);
c9d39130
JG
604static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
605 unsigned int n_hc);
522479fb 606static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
f351b2d6
SB
607static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
608 void __iomem *mmio);
609static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
610 void __iomem *mmio);
611static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
612 void __iomem *mmio, unsigned int n_hc);
613static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
614 void __iomem *mmio);
615static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
29b7e43c
MM
616static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
617 void __iomem *mmio, unsigned int port);
7bb3c529 618static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
e12bef50 619static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
c9d39130 620 unsigned int port_no);
e12bef50 621static int mv_stop_edma(struct ata_port *ap);
b562468c 622static int mv_stop_edma_engine(void __iomem *port_mmio);
00b81235 623static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
47c2b677 624
e49856d8
ML
625static void mv_pmp_select(struct ata_port *ap, int pmp);
626static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
627 unsigned long deadline);
628static int mv_softreset(struct ata_link *link, unsigned int *class,
629 unsigned long deadline);
29d187bb 630static void mv_pmp_error_handler(struct ata_port *ap);
4c299ca3
ML
631static void mv_process_crpb_entries(struct ata_port *ap,
632 struct mv_port_priv *pp);
47c2b677 633
da14265e
ML
634static void mv_sff_irq_clear(struct ata_port *ap);
635static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
636static void mv_bmdma_setup(struct ata_queued_cmd *qc);
637static void mv_bmdma_start(struct ata_queued_cmd *qc);
638static void mv_bmdma_stop(struct ata_queued_cmd *qc);
639static u8 mv_bmdma_status(struct ata_port *ap);
d16ab3f6 640static u8 mv_sff_check_status(struct ata_port *ap);
da14265e 641
eb73d558
ML
642/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
643 * because we have to allow room for worst case splitting of
644 * PRDs for 64K boundaries in mv_fill_sg().
645 */
c5d3e45a 646static struct scsi_host_template mv5_sht = {
68d1d07b 647 ATA_BASE_SHT(DRV_NAME),
baf14aa1 648 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a 649 .dma_boundary = MV_DMA_BOUNDARY,
c5d3e45a
JG
650};
651
652static struct scsi_host_template mv6_sht = {
68d1d07b 653 ATA_NCQ_SHT(DRV_NAME),
138bfdd0 654 .can_queue = MV_MAX_Q_DEPTH - 1,
baf14aa1 655 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7 656 .dma_boundary = MV_DMA_BOUNDARY,
20f733e7
BR
657};
658
029cfd6b
TH
659static struct ata_port_operations mv5_ops = {
660 .inherits = &ata_sff_port_ops,
c9d39130 661
c96f1732
AC
662 .lost_interrupt = ATA_OP_NULL,
663
3e4a1391 664 .qc_defer = mv_qc_defer,
c9d39130
JG
665 .qc_prep = mv_qc_prep,
666 .qc_issue = mv_qc_issue,
c9d39130 667
bdd4ddde
JG
668 .freeze = mv_eh_freeze,
669 .thaw = mv_eh_thaw,
a1efdaba 670 .hardreset = mv_hardreset,
a1efdaba 671 .error_handler = ata_std_error_handler, /* avoid SFF EH */
029cfd6b 672 .post_internal_cmd = ATA_OP_NULL,
bdd4ddde 673
c9d39130
JG
674 .scr_read = mv5_scr_read,
675 .scr_write = mv5_scr_write,
676
677 .port_start = mv_port_start,
678 .port_stop = mv_port_stop,
c9d39130
JG
679};
680
029cfd6b
TH
681static struct ata_port_operations mv6_ops = {
682 .inherits = &mv5_ops,
f273827e 683 .dev_config = mv6_dev_config,
20f733e7
BR
684 .scr_read = mv_scr_read,
685 .scr_write = mv_scr_write,
686
e49856d8
ML
687 .pmp_hardreset = mv_pmp_hardreset,
688 .pmp_softreset = mv_softreset,
689 .softreset = mv_softreset,
29d187bb 690 .error_handler = mv_pmp_error_handler,
da14265e 691
40f21b11 692 .sff_check_status = mv_sff_check_status,
da14265e
ML
693 .sff_irq_clear = mv_sff_irq_clear,
694 .check_atapi_dma = mv_check_atapi_dma,
695 .bmdma_setup = mv_bmdma_setup,
696 .bmdma_start = mv_bmdma_start,
697 .bmdma_stop = mv_bmdma_stop,
698 .bmdma_status = mv_bmdma_status,
20f733e7
BR
699};
700
029cfd6b
TH
701static struct ata_port_operations mv_iie_ops = {
702 .inherits = &mv6_ops,
703 .dev_config = ATA_OP_NULL,
e4e7b892 704 .qc_prep = mv_qc_prep_iie,
e4e7b892
JG
705};
706
98ac62de 707static const struct ata_port_info mv_port_info[] = {
20f733e7 708 { /* chip_504x */
91b1a84c 709 .flags = MV_GEN_I_FLAGS,
c361acbc 710 .pio_mask = ATA_PIO4,
bf6263a8 711 .udma_mask = ATA_UDMA6,
c9d39130 712 .port_ops = &mv5_ops,
20f733e7
BR
713 },
714 { /* chip_508x */
91b1a84c 715 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
c361acbc 716 .pio_mask = ATA_PIO4,
bf6263a8 717 .udma_mask = ATA_UDMA6,
c9d39130 718 .port_ops = &mv5_ops,
20f733e7 719 },
47c2b677 720 { /* chip_5080 */
91b1a84c 721 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
c361acbc 722 .pio_mask = ATA_PIO4,
bf6263a8 723 .udma_mask = ATA_UDMA6,
c9d39130 724 .port_ops = &mv5_ops,
47c2b677 725 },
20f733e7 726 { /* chip_604x */
91b1a84c 727 .flags = MV_GEN_II_FLAGS,
c361acbc 728 .pio_mask = ATA_PIO4,
bf6263a8 729 .udma_mask = ATA_UDMA6,
c9d39130 730 .port_ops = &mv6_ops,
20f733e7
BR
731 },
732 { /* chip_608x */
91b1a84c 733 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
c361acbc 734 .pio_mask = ATA_PIO4,
bf6263a8 735 .udma_mask = ATA_UDMA6,
c9d39130 736 .port_ops = &mv6_ops,
20f733e7 737 },
e4e7b892 738 { /* chip_6042 */
91b1a84c 739 .flags = MV_GEN_IIE_FLAGS,
c361acbc 740 .pio_mask = ATA_PIO4,
bf6263a8 741 .udma_mask = ATA_UDMA6,
e4e7b892
JG
742 .port_ops = &mv_iie_ops,
743 },
744 { /* chip_7042 */
91b1a84c 745 .flags = MV_GEN_IIE_FLAGS,
c361acbc 746 .pio_mask = ATA_PIO4,
bf6263a8 747 .udma_mask = ATA_UDMA6,
e4e7b892
JG
748 .port_ops = &mv_iie_ops,
749 },
f351b2d6 750 { /* chip_soc */
91b1a84c 751 .flags = MV_GEN_IIE_FLAGS,
c361acbc 752 .pio_mask = ATA_PIO4,
17c5aab5
ML
753 .udma_mask = ATA_UDMA6,
754 .port_ops = &mv_iie_ops,
f351b2d6 755 },
20f733e7
BR
756};
757
3b7d697d 758static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
759 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
760 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
761 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
762 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
46c5784c
ML
763 /* RocketRAID 1720/174x have different identifiers */
764 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
4462254a
ML
765 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
766 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
2d2744fc
JG
767
768 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
769 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
770 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
771 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
772 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
773
774 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
775
d9f9c6bc
FA
776 /* Adaptec 1430SA */
777 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
778
02a121da 779 /* Marvell 7042 support */
6a3d586d
MT
780 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
781
02a121da
ML
782 /* Highpoint RocketRAID PCIe series */
783 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
784 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
785
2d2744fc 786 { } /* terminate list */
20f733e7
BR
787};
788
47c2b677
JG
789static const struct mv_hw_ops mv5xxx_ops = {
790 .phy_errata = mv5_phy_errata,
791 .enable_leds = mv5_enable_leds,
792 .read_preamp = mv5_read_preamp,
793 .reset_hc = mv5_reset_hc,
522479fb
JG
794 .reset_flash = mv5_reset_flash,
795 .reset_bus = mv5_reset_bus,
47c2b677
JG
796};
797
798static const struct mv_hw_ops mv6xxx_ops = {
799 .phy_errata = mv6_phy_errata,
800 .enable_leds = mv6_enable_leds,
801 .read_preamp = mv6_read_preamp,
802 .reset_hc = mv6_reset_hc,
522479fb
JG
803 .reset_flash = mv6_reset_flash,
804 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
805};
806
f351b2d6
SB
807static const struct mv_hw_ops mv_soc_ops = {
808 .phy_errata = mv6_phy_errata,
809 .enable_leds = mv_soc_enable_leds,
810 .read_preamp = mv_soc_read_preamp,
811 .reset_hc = mv_soc_reset_hc,
812 .reset_flash = mv_soc_reset_flash,
813 .reset_bus = mv_soc_reset_bus,
814};
815
29b7e43c
MM
816static const struct mv_hw_ops mv_soc_65n_ops = {
817 .phy_errata = mv_soc_65n_phy_errata,
818 .enable_leds = mv_soc_enable_leds,
819 .reset_hc = mv_soc_reset_hc,
820 .reset_flash = mv_soc_reset_flash,
821 .reset_bus = mv_soc_reset_bus,
822};
823
20f733e7
BR
824/*
825 * Functions
826 */
827
828static inline void writelfl(unsigned long data, void __iomem *addr)
829{
830 writel(data, addr);
831 (void) readl(addr); /* flush to avoid PCI posted write */
832}
833
c9d39130
JG
834static inline unsigned int mv_hc_from_port(unsigned int port)
835{
836 return port >> MV_PORT_HC_SHIFT;
837}
838
839static inline unsigned int mv_hardport_from_port(unsigned int port)
840{
841 return port & MV_PORT_MASK;
842}
843
1cfd19ae
ML
844/*
845 * Consolidate some rather tricky bit shift calculations.
846 * This is hot-path stuff, so not a function.
847 * Simple code, with two return values, so macro rather than inline.
848 *
849 * port is the sole input, in range 0..7.
7368f919
ML
850 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
851 * hardport is the other output, in range 0..3.
1cfd19ae
ML
852 *
853 * Note that port and hardport may be the same variable in some cases.
854 */
855#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
856{ \
857 shift = mv_hc_from_port(port) * HC_SHIFT; \
858 hardport = mv_hardport_from_port(port); \
859 shift += hardport * 2; \
860}
861
352fab70
ML
862static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
863{
cae5a29d 864 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
352fab70
ML
865}
866
c9d39130
JG
867static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
868 unsigned int port)
869{
870 return mv_hc_base(base, mv_hc_from_port(port));
871}
872
20f733e7
BR
873static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
874{
c9d39130 875 return mv_hc_base_from_port(base, port) +
8b260248 876 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 877 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
878}
879
e12bef50
ML
880static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
881{
882 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
883 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
884
885 return hc_mmio + ofs;
886}
887
f351b2d6
SB
888static inline void __iomem *mv_host_base(struct ata_host *host)
889{
890 struct mv_host_priv *hpriv = host->private_data;
891 return hpriv->base;
892}
893
20f733e7
BR
894static inline void __iomem *mv_ap_base(struct ata_port *ap)
895{
f351b2d6 896 return mv_port_base(mv_host_base(ap->host), ap->port_no);
20f733e7
BR
897}
898
cca3974e 899static inline int mv_get_hc_count(unsigned long port_flags)
31961943 900{
cca3974e 901 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
902}
903
08da1759
ML
904/**
905 * mv_save_cached_regs - (re-)initialize cached port registers
906 * @ap: the port whose registers we are caching
907 *
908 * Initialize the local cache of port registers,
909 * so that reading them over and over again can
910 * be avoided on the hotter paths of this driver.
911 * This saves a few microseconds each time we switch
912 * to/from EDMA mode to perform (eg.) a drive cache flush.
913 */
914static void mv_save_cached_regs(struct ata_port *ap)
915{
916 void __iomem *port_mmio = mv_ap_base(ap);
917 struct mv_port_priv *pp = ap->private_data;
918
cae5a29d
ML
919 pp->cached.fiscfg = readl(port_mmio + FISCFG);
920 pp->cached.ltmode = readl(port_mmio + LTMODE);
921 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
922 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
08da1759
ML
923}
924
925/**
926 * mv_write_cached_reg - write to a cached port register
927 * @addr: hardware address of the register
928 * @old: pointer to cached value of the register
929 * @new: new value for the register
930 *
931 * Write a new value to a cached register,
932 * but only if the value is different from before.
933 */
934static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
935{
936 if (new != *old) {
12f3b6d7 937 unsigned long laddr;
08da1759 938 *old = new;
12f3b6d7
ML
939 /*
940 * Workaround for 88SX60x1-B2 FEr SATA#13:
941 * Read-after-write is needed to prevent generating 64-bit
942 * write cycles on the PCI bus for SATA interface registers
943 * at offsets ending in 0x4 or 0xc.
944 *
945 * Looks like a lot of fuss, but it avoids an unnecessary
946 * +1 usec read-after-write delay for unaffected registers.
947 */
948 laddr = (long)addr & 0xffff;
949 if (laddr >= 0x300 && laddr <= 0x33c) {
950 laddr &= 0x000f;
951 if (laddr == 0x4 || laddr == 0xc) {
952 writelfl(new, addr); /* read after write */
953 return;
954 }
955 }
956 writel(new, addr); /* unaffected by the errata */
08da1759
ML
957 }
958}
959
c5d3e45a
JG
960static void mv_set_edma_ptrs(void __iomem *port_mmio,
961 struct mv_host_priv *hpriv,
962 struct mv_port_priv *pp)
963{
bdd4ddde
JG
964 u32 index;
965
c5d3e45a
JG
966 /*
967 * initialize request queue
968 */
fcfb1f77
ML
969 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
970 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
bdd4ddde 971
c5d3e45a 972 WARN_ON(pp->crqb_dma & 0x3ff);
cae5a29d 973 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
bdd4ddde 974 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
cae5a29d
ML
975 port_mmio + EDMA_REQ_Q_IN_PTR);
976 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
c5d3e45a
JG
977
978 /*
979 * initialize response queue
980 */
fcfb1f77
ML
981 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
982 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
bdd4ddde 983
c5d3e45a 984 WARN_ON(pp->crpb_dma & 0xff);
cae5a29d
ML
985 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
986 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
bdd4ddde 987 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
cae5a29d 988 port_mmio + EDMA_RSP_Q_OUT_PTR);
c5d3e45a
JG
989}
990
2b748a0a
ML
991static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
992{
993 /*
994 * When writing to the main_irq_mask in hardware,
995 * we must ensure exclusivity between the interrupt coalescing bits
996 * and the corresponding individual port DONE_IRQ bits.
997 *
998 * Note that this register is really an "IRQ enable" register,
999 * not an "IRQ mask" register as Marvell's naming might suggest.
1000 */
1001 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1002 mask &= ~DONE_IRQ_0_3;
1003 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1004 mask &= ~DONE_IRQ_4_7;
1005 writelfl(mask, hpriv->main_irq_mask_addr);
1006}
1007
c4de573b
ML
1008static void mv_set_main_irq_mask(struct ata_host *host,
1009 u32 disable_bits, u32 enable_bits)
1010{
1011 struct mv_host_priv *hpriv = host->private_data;
1012 u32 old_mask, new_mask;
1013
96e2c487 1014 old_mask = hpriv->main_irq_mask;
c4de573b 1015 new_mask = (old_mask & ~disable_bits) | enable_bits;
96e2c487
ML
1016 if (new_mask != old_mask) {
1017 hpriv->main_irq_mask = new_mask;
2b748a0a 1018 mv_write_main_irq_mask(new_mask, hpriv);
96e2c487 1019 }
c4de573b
ML
1020}
1021
1022static void mv_enable_port_irqs(struct ata_port *ap,
1023 unsigned int port_bits)
1024{
1025 unsigned int shift, hardport, port = ap->port_no;
1026 u32 disable_bits, enable_bits;
1027
1028 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1029
1030 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1031 enable_bits = port_bits << shift;
1032 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1033}
1034
00b81235
ML
1035static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1036 void __iomem *port_mmio,
1037 unsigned int port_irqs)
1038{
1039 struct mv_host_priv *hpriv = ap->host->private_data;
1040 int hardport = mv_hardport_from_port(ap->port_no);
1041 void __iomem *hc_mmio = mv_hc_base_from_port(
1042 mv_host_base(ap->host), ap->port_no);
1043 u32 hc_irq_cause;
1044
1045 /* clear EDMA event indicators, if any */
cae5a29d 1046 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
00b81235
ML
1047
1048 /* clear pending irq events */
1049 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
cae5a29d 1050 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
00b81235
ML
1051
1052 /* clear FIS IRQ Cause */
1053 if (IS_GEN_IIE(hpriv))
cae5a29d 1054 writelfl(0, port_mmio + FIS_IRQ_CAUSE);
00b81235
ML
1055
1056 mv_enable_port_irqs(ap, port_irqs);
1057}
1058
2b748a0a
ML
1059static void mv_set_irq_coalescing(struct ata_host *host,
1060 unsigned int count, unsigned int usecs)
1061{
1062 struct mv_host_priv *hpriv = host->private_data;
1063 void __iomem *mmio = hpriv->base, *hc_mmio;
1064 u32 coal_enable = 0;
1065 unsigned long flags;
6abf4678 1066 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
2b748a0a
ML
1067 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1068 ALL_PORTS_COAL_DONE;
1069
1070 /* Disable IRQ coalescing if either threshold is zero */
1071 if (!usecs || !count) {
1072 clks = count = 0;
1073 } else {
1074 /* Respect maximum limits of the hardware */
1075 clks = usecs * COAL_CLOCKS_PER_USEC;
1076 if (clks > MAX_COAL_TIME_THRESHOLD)
1077 clks = MAX_COAL_TIME_THRESHOLD;
1078 if (count > MAX_COAL_IO_COUNT)
1079 count = MAX_COAL_IO_COUNT;
1080 }
1081
1082 spin_lock_irqsave(&host->lock, flags);
6abf4678 1083 mv_set_main_irq_mask(host, coal_disable, 0);
2b748a0a 1084
6abf4678 1085 if (is_dual_hc && !IS_GEN_I(hpriv)) {
2b748a0a 1086 /*
6abf4678
ML
1087 * GEN_II/GEN_IIE with dual host controllers:
1088 * one set of global thresholds for the entire chip.
2b748a0a 1089 */
cae5a29d
ML
1090 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
1091 writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
2b748a0a 1092 /* clear leftover coal IRQ bit */
cae5a29d 1093 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
6abf4678
ML
1094 if (count)
1095 coal_enable = ALL_PORTS_COAL_DONE;
1096 clks = count = 0; /* force clearing of regular regs below */
2b748a0a 1097 }
6abf4678 1098
2b748a0a
ML
1099 /*
1100 * All chips: independent thresholds for each HC on the chip.
1101 */
1102 hc_mmio = mv_hc_base_from_port(mmio, 0);
cae5a29d
ML
1103 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1104 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1105 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
6abf4678
ML
1106 if (count)
1107 coal_enable |= PORTS_0_3_COAL_DONE;
1108 if (is_dual_hc) {
2b748a0a 1109 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
cae5a29d
ML
1110 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1111 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1112 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
6abf4678
ML
1113 if (count)
1114 coal_enable |= PORTS_4_7_COAL_DONE;
2b748a0a 1115 }
2b748a0a 1116
6abf4678 1117 mv_set_main_irq_mask(host, 0, coal_enable);
2b748a0a
ML
1118 spin_unlock_irqrestore(&host->lock, flags);
1119}
1120
05b308e1 1121/**
00b81235 1122 * mv_start_edma - Enable eDMA engine
05b308e1
BR
1123 * @base: port base address
1124 * @pp: port private data
1125 *
beec7dbc
TH
1126 * Verify the local cache of the eDMA state is accurate with a
1127 * WARN_ON.
05b308e1
BR
1128 *
1129 * LOCKING:
1130 * Inherited from caller.
1131 */
00b81235 1132static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
72109168 1133 struct mv_port_priv *pp, u8 protocol)
20f733e7 1134{
72109168
ML
1135 int want_ncq = (protocol == ATA_PROT_NCQ);
1136
1137 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1138 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1139 if (want_ncq != using_ncq)
b562468c 1140 mv_stop_edma(ap);
72109168 1141 }
c5d3e45a 1142 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
0c58912e 1143 struct mv_host_priv *hpriv = ap->host->private_data;
0c58912e 1144
00b81235 1145 mv_edma_cfg(ap, want_ncq, 1);
0c58912e 1146
f630d562 1147 mv_set_edma_ptrs(port_mmio, hpriv, pp);
00b81235 1148 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
bdd4ddde 1149
cae5a29d 1150 writelfl(EDMA_EN, port_mmio + EDMA_CMD);
afb0edd9
BR
1151 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1152 }
20f733e7
BR
1153}
1154
9b2c4e0b
ML
1155static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1156{
1157 void __iomem *port_mmio = mv_ap_base(ap);
1158 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1159 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1160 int i;
1161
1162 /*
1163 * Wait for the EDMA engine to finish transactions in progress.
c46938cc
ML
1164 * No idea what a good "timeout" value might be, but measurements
1165 * indicate that it often requires hundreds of microseconds
1166 * with two drives in-use. So we use the 15msec value above
1167 * as a rough guess at what even more drives might require.
9b2c4e0b
ML
1168 */
1169 for (i = 0; i < timeout; ++i) {
cae5a29d 1170 u32 edma_stat = readl(port_mmio + EDMA_STATUS);
9b2c4e0b
ML
1171 if ((edma_stat & empty_idle) == empty_idle)
1172 break;
1173 udelay(per_loop);
1174 }
1175 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
1176}
1177
05b308e1 1178/**
e12bef50 1179 * mv_stop_edma_engine - Disable eDMA engine
b562468c 1180 * @port_mmio: io base address
05b308e1
BR
1181 *
1182 * LOCKING:
1183 * Inherited from caller.
1184 */
b562468c 1185static int mv_stop_edma_engine(void __iomem *port_mmio)
20f733e7 1186{
b562468c 1187 int i;
31961943 1188
b562468c 1189 /* Disable eDMA. The disable bit auto clears. */
cae5a29d 1190 writelfl(EDMA_DS, port_mmio + EDMA_CMD);
8b260248 1191
b562468c
ML
1192 /* Wait for the chip to confirm eDMA is off. */
1193 for (i = 10000; i > 0; i--) {
cae5a29d 1194 u32 reg = readl(port_mmio + EDMA_CMD);
4537deb5 1195 if (!(reg & EDMA_EN))
b562468c
ML
1196 return 0;
1197 udelay(10);
31961943 1198 }
b562468c 1199 return -EIO;
20f733e7
BR
1200}
1201
e12bef50 1202static int mv_stop_edma(struct ata_port *ap)
0ea9e179 1203{
b562468c
ML
1204 void __iomem *port_mmio = mv_ap_base(ap);
1205 struct mv_port_priv *pp = ap->private_data;
66e57a2c 1206 int err = 0;
0ea9e179 1207
b562468c
ML
1208 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1209 return 0;
1210 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
9b2c4e0b 1211 mv_wait_for_edma_empty_idle(ap);
b562468c
ML
1212 if (mv_stop_edma_engine(port_mmio)) {
1213 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
66e57a2c 1214 err = -EIO;
b562468c 1215 }
66e57a2c
ML
1216 mv_edma_cfg(ap, 0, 0);
1217 return err;
0ea9e179
JG
1218}
1219
8a70f8dc 1220#ifdef ATA_DEBUG
31961943 1221static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 1222{
31961943
BR
1223 int b, w;
1224 for (b = 0; b < bytes; ) {
1225 DPRINTK("%p: ", start + b);
1226 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 1227 printk("%08x ", readl(start + b));
31961943
BR
1228 b += sizeof(u32);
1229 }
1230 printk("\n");
1231 }
31961943 1232}
8a70f8dc
JG
1233#endif
1234
31961943
BR
1235static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1236{
1237#ifdef ATA_DEBUG
1238 int b, w;
1239 u32 dw;
1240 for (b = 0; b < bytes; ) {
1241 DPRINTK("%02x: ", b);
1242 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
1243 (void) pci_read_config_dword(pdev, b, &dw);
1244 printk("%08x ", dw);
31961943
BR
1245 b += sizeof(u32);
1246 }
1247 printk("\n");
1248 }
1249#endif
1250}
1251static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1252 struct pci_dev *pdev)
1253{
1254#ifdef ATA_DEBUG
8b260248 1255 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
1256 port >> MV_PORT_HC_SHIFT);
1257 void __iomem *port_base;
1258 int start_port, num_ports, p, start_hc, num_hcs, hc;
1259
1260 if (0 > port) {
1261 start_hc = start_port = 0;
1262 num_ports = 8; /* shld be benign for 4 port devs */
1263 num_hcs = 2;
1264 } else {
1265 start_hc = port >> MV_PORT_HC_SHIFT;
1266 start_port = port;
1267 num_ports = num_hcs = 1;
1268 }
8b260248 1269 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
1270 num_ports > 1 ? num_ports - 1 : start_port);
1271
1272 if (NULL != pdev) {
1273 DPRINTK("PCI config space regs:\n");
1274 mv_dump_pci_cfg(pdev, 0x68);
1275 }
1276 DPRINTK("PCI regs:\n");
1277 mv_dump_mem(mmio_base+0xc00, 0x3c);
1278 mv_dump_mem(mmio_base+0xd00, 0x34);
1279 mv_dump_mem(mmio_base+0xf00, 0x4);
1280 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1281 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 1282 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
1283 DPRINTK("HC regs (HC %i):\n", hc);
1284 mv_dump_mem(hc_base, 0x1c);
1285 }
1286 for (p = start_port; p < start_port + num_ports; p++) {
1287 port_base = mv_port_base(mmio_base, p);
2dcb407e 1288 DPRINTK("EDMA regs (port %i):\n", p);
31961943 1289 mv_dump_mem(port_base, 0x54);
2dcb407e 1290 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
1291 mv_dump_mem(port_base+0x300, 0x60);
1292 }
1293#endif
20f733e7
BR
1294}
1295
1296static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1297{
1298 unsigned int ofs;
1299
1300 switch (sc_reg_in) {
1301 case SCR_STATUS:
1302 case SCR_CONTROL:
1303 case SCR_ERROR:
cae5a29d 1304 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
20f733e7
BR
1305 break;
1306 case SCR_ACTIVE:
cae5a29d 1307 ofs = SATA_ACTIVE; /* active is not with the others */
20f733e7
BR
1308 break;
1309 default:
1310 ofs = 0xffffffffU;
1311 break;
1312 }
1313 return ofs;
1314}
1315
82ef04fb 1316static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
1317{
1318 unsigned int ofs = mv_scr_offset(sc_reg_in);
1319
da3dbb17 1320 if (ofs != 0xffffffffU) {
82ef04fb 1321 *val = readl(mv_ap_base(link->ap) + ofs);
da3dbb17
TH
1322 return 0;
1323 } else
1324 return -EINVAL;
20f733e7
BR
1325}
1326
82ef04fb 1327static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
20f733e7
BR
1328{
1329 unsigned int ofs = mv_scr_offset(sc_reg_in);
1330
da3dbb17 1331 if (ofs != 0xffffffffU) {
20091773
ML
1332 void __iomem *addr = mv_ap_base(link->ap) + ofs;
1333 if (sc_reg_in == SCR_CONTROL) {
1334 /*
1335 * Workaround for 88SX60x1 FEr SATA#26:
1336 *
1337 * COMRESETs have to take care not to accidently
1338 * put the drive to sleep when writing SCR_CONTROL.
1339 * Setting bits 12..15 prevents this problem.
1340 *
1341 * So if we see an outbound COMMRESET, set those bits.
1342 * Ditto for the followup write that clears the reset.
1343 *
1344 * The proprietary driver does this for
1345 * all chip versions, and so do we.
1346 */
1347 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1348 val |= 0xf000;
1349 }
1350 writelfl(val, addr);
da3dbb17
TH
1351 return 0;
1352 } else
1353 return -EINVAL;
20f733e7
BR
1354}
1355
f273827e
ML
1356static void mv6_dev_config(struct ata_device *adev)
1357{
1358 /*
e49856d8
ML
1359 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1360 *
1361 * Gen-II does not support NCQ over a port multiplier
1362 * (no FIS-based switching).
f273827e 1363 */
e49856d8 1364 if (adev->flags & ATA_DFLAG_NCQ) {
352fab70 1365 if (sata_pmp_attached(adev->link->ap)) {
e49856d8 1366 adev->flags &= ~ATA_DFLAG_NCQ;
352fab70
ML
1367 ata_dev_printk(adev, KERN_INFO,
1368 "NCQ disabled for command-based switching\n");
352fab70 1369 }
e49856d8 1370 }
f273827e
ML
1371}
1372
3e4a1391
ML
1373static int mv_qc_defer(struct ata_queued_cmd *qc)
1374{
1375 struct ata_link *link = qc->dev->link;
1376 struct ata_port *ap = link->ap;
1377 struct mv_port_priv *pp = ap->private_data;
1378
29d187bb
ML
1379 /*
1380 * Don't allow new commands if we're in a delayed EH state
1381 * for NCQ and/or FIS-based switching.
1382 */
1383 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1384 return ATA_DEFER_PORT;
159a7ff7
GG
1385
1386 /* PIO commands need exclusive link: no other commands [DMA or PIO]
1387 * can run concurrently.
1388 * set excl_link when we want to send a PIO command in DMA mode
1389 * or a non-NCQ command in NCQ mode.
1390 * When we receive a command from that link, and there are no
1391 * outstanding commands, mark a flag to clear excl_link and let
1392 * the command go through.
1393 */
1394 if (unlikely(ap->excl_link)) {
1395 if (link == ap->excl_link) {
1396 if (ap->nr_active_links)
1397 return ATA_DEFER_PORT;
1398 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1399 return 0;
1400 } else
1401 return ATA_DEFER_PORT;
1402 }
1403
3e4a1391
ML
1404 /*
1405 * If the port is completely idle, then allow the new qc.
1406 */
1407 if (ap->nr_active_links == 0)
1408 return 0;
1409
4bdee6c5
TH
1410 /*
1411 * The port is operating in host queuing mode (EDMA) with NCQ
1412 * enabled, allow multiple NCQ commands. EDMA also allows
1413 * queueing multiple DMA commands but libata core currently
1414 * doesn't allow it.
1415 */
1416 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
159a7ff7
GG
1417 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1418 if (ata_is_ncq(qc->tf.protocol))
1419 return 0;
1420 else {
1421 ap->excl_link = link;
1422 return ATA_DEFER_PORT;
1423 }
1424 }
4bdee6c5 1425
3e4a1391
ML
1426 return ATA_DEFER_PORT;
1427}
1428
08da1759 1429static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
e49856d8 1430{
08da1759
ML
1431 struct mv_port_priv *pp = ap->private_data;
1432 void __iomem *port_mmio;
00f42eab 1433
08da1759
ML
1434 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
1435 u32 ltmode, *old_ltmode = &pp->cached.ltmode;
1436 u32 haltcond, *old_haltcond = &pp->cached.haltcond;
00f42eab 1437
08da1759
ML
1438 ltmode = *old_ltmode & ~LTMODE_BIT8;
1439 haltcond = *old_haltcond | EDMA_ERR_DEV;
00f42eab
ML
1440
1441 if (want_fbs) {
08da1759
ML
1442 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1443 ltmode = *old_ltmode | LTMODE_BIT8;
4c299ca3 1444 if (want_ncq)
08da1759 1445 haltcond &= ~EDMA_ERR_DEV;
4c299ca3 1446 else
08da1759
ML
1447 fiscfg |= FISCFG_WAIT_DEV_ERR;
1448 } else {
1449 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
e49856d8 1450 }
00f42eab 1451
08da1759 1452 port_mmio = mv_ap_base(ap);
cae5a29d
ML
1453 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1454 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1455 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
f273827e
ML
1456}
1457
dd2890f6
ML
1458static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1459{
1460 struct mv_host_priv *hpriv = ap->host->private_data;
1461 u32 old, new;
1462
1463 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
cae5a29d 1464 old = readl(hpriv->base + GPIO_PORT_CTL);
dd2890f6
ML
1465 if (want_ncq)
1466 new = old | (1 << 22);
1467 else
1468 new = old & ~(1 << 22);
1469 if (new != old)
cae5a29d 1470 writel(new, hpriv->base + GPIO_PORT_CTL);
dd2890f6
ML
1471}
1472
c01e8a23 1473/**
40f21b11
ML
1474 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1475 * @ap: Port being initialized
c01e8a23
ML
1476 *
1477 * There are two DMA modes on these chips: basic DMA, and EDMA.
1478 *
1479 * Bit-0 of the "EDMA RESERVED" register enables/disables use
1480 * of basic DMA on the GEN_IIE versions of the chips.
1481 *
1482 * This bit survives EDMA resets, and must be set for basic DMA
1483 * to function, and should be cleared when EDMA is active.
1484 */
1485static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1486{
1487 struct mv_port_priv *pp = ap->private_data;
1488 u32 new, *old = &pp->cached.unknown_rsvd;
1489
1490 if (enable_bmdma)
1491 new = *old | 1;
1492 else
1493 new = *old & ~1;
cae5a29d 1494 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
c01e8a23
ML
1495}
1496
000b344f
ML
1497/*
1498 * SOC chips have an issue whereby the HDD LEDs don't always blink
1499 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1500 * of the SOC takes care of it, generating a steady blink rate when
1501 * any drive on the chip is active.
1502 *
1503 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1504 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1505 *
1506 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1507 * LED operation works then, and provides better (more accurate) feedback.
1508 *
1509 * Note that this code assumes that an SOC never has more than one HC onboard.
1510 */
1511static void mv_soc_led_blink_enable(struct ata_port *ap)
1512{
1513 struct ata_host *host = ap->host;
1514 struct mv_host_priv *hpriv = host->private_data;
1515 void __iomem *hc_mmio;
1516 u32 led_ctrl;
1517
1518 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1519 return;
1520 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1521 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
cae5a29d
ML
1522 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1523 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
000b344f
ML
1524}
1525
1526static void mv_soc_led_blink_disable(struct ata_port *ap)
1527{
1528 struct ata_host *host = ap->host;
1529 struct mv_host_priv *hpriv = host->private_data;
1530 void __iomem *hc_mmio;
1531 u32 led_ctrl;
1532 unsigned int port;
1533
1534 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1535 return;
1536
1537 /* disable led-blink only if no ports are using NCQ */
1538 for (port = 0; port < hpriv->n_ports; port++) {
1539 struct ata_port *this_ap = host->ports[port];
1540 struct mv_port_priv *pp = this_ap->private_data;
1541
1542 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1543 return;
1544 }
1545
1546 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1547 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
cae5a29d
ML
1548 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1549 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
000b344f
ML
1550}
1551
00b81235 1552static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
e4e7b892 1553{
0c58912e 1554 u32 cfg;
e12bef50
ML
1555 struct mv_port_priv *pp = ap->private_data;
1556 struct mv_host_priv *hpriv = ap->host->private_data;
1557 void __iomem *port_mmio = mv_ap_base(ap);
e4e7b892
JG
1558
1559 /* set up non-NCQ EDMA configuration */
0c58912e 1560 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
d16ab3f6
ML
1561 pp->pp_flags &=
1562 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
e4e7b892 1563
0c58912e 1564 if (IS_GEN_I(hpriv))
e4e7b892
JG
1565 cfg |= (1 << 8); /* enab config burst size mask */
1566
dd2890f6 1567 else if (IS_GEN_II(hpriv)) {
e4e7b892 1568 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
dd2890f6 1569 mv_60x1_errata_sata25(ap, want_ncq);
e4e7b892 1570
dd2890f6 1571 } else if (IS_GEN_IIE(hpriv)) {
00f42eab
ML
1572 int want_fbs = sata_pmp_attached(ap);
1573 /*
1574 * Possible future enhancement:
1575 *
1576 * The chip can use FBS with non-NCQ, if we allow it,
1577 * But first we need to have the error handling in place
1578 * for this mode (datasheet section 7.3.15.4.2.3).
1579 * So disallow non-NCQ FBS for now.
1580 */
1581 want_fbs &= want_ncq;
1582
08da1759 1583 mv_config_fbs(ap, want_ncq, want_fbs);
00f42eab
ML
1584
1585 if (want_fbs) {
1586 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1587 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1588 }
1589
e728eabe 1590 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
00b81235
ML
1591 if (want_edma) {
1592 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1593 if (!IS_SOC(hpriv))
1594 cfg |= (1 << 18); /* enab early completion */
1595 }
616d4a98
ML
1596 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1597 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
c01e8a23 1598 mv_bmdma_enable_iie(ap, !want_edma);
000b344f
ML
1599
1600 if (IS_SOC(hpriv)) {
1601 if (want_ncq)
1602 mv_soc_led_blink_enable(ap);
1603 else
1604 mv_soc_led_blink_disable(ap);
1605 }
e4e7b892
JG
1606 }
1607
72109168
ML
1608 if (want_ncq) {
1609 cfg |= EDMA_CFG_NCQ;
1610 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
00b81235 1611 }
72109168 1612
cae5a29d 1613 writelfl(cfg, port_mmio + EDMA_CFG);
e4e7b892
JG
1614}
1615
da2fa9ba
ML
1616static void mv_port_free_dma_mem(struct ata_port *ap)
1617{
1618 struct mv_host_priv *hpriv = ap->host->private_data;
1619 struct mv_port_priv *pp = ap->private_data;
eb73d558 1620 int tag;
da2fa9ba
ML
1621
1622 if (pp->crqb) {
1623 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1624 pp->crqb = NULL;
1625 }
1626 if (pp->crpb) {
1627 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1628 pp->crpb = NULL;
1629 }
eb73d558
ML
1630 /*
1631 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1632 * For later hardware, we have one unique sg_tbl per NCQ tag.
1633 */
1634 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1635 if (pp->sg_tbl[tag]) {
1636 if (tag == 0 || !IS_GEN_I(hpriv))
1637 dma_pool_free(hpriv->sg_tbl_pool,
1638 pp->sg_tbl[tag],
1639 pp->sg_tbl_dma[tag]);
1640 pp->sg_tbl[tag] = NULL;
1641 }
da2fa9ba
ML
1642 }
1643}
1644
05b308e1
BR
1645/**
1646 * mv_port_start - Port specific init/start routine.
1647 * @ap: ATA channel to manipulate
1648 *
1649 * Allocate and point to DMA memory, init port private memory,
1650 * zero indices.
1651 *
1652 * LOCKING:
1653 * Inherited from caller.
1654 */
31961943
BR
1655static int mv_port_start(struct ata_port *ap)
1656{
cca3974e
JG
1657 struct device *dev = ap->host->dev;
1658 struct mv_host_priv *hpriv = ap->host->private_data;
31961943 1659 struct mv_port_priv *pp;
933cb8e5 1660 unsigned long flags;
dde20207 1661 int tag;
31961943 1662
24dc5f33 1663 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1664 if (!pp)
24dc5f33 1665 return -ENOMEM;
da2fa9ba 1666 ap->private_data = pp;
31961943 1667
da2fa9ba
ML
1668 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1669 if (!pp->crqb)
1670 return -ENOMEM;
1671 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
31961943 1672
da2fa9ba
ML
1673 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1674 if (!pp->crpb)
1675 goto out_port_free_dma_mem;
1676 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
31961943 1677
3bd0a70e
ML
1678 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1679 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1680 ap->flags |= ATA_FLAG_AN;
eb73d558
ML
1681 /*
1682 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1683 * For later hardware, we need one unique sg_tbl per NCQ tag.
1684 */
1685 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1686 if (tag == 0 || !IS_GEN_I(hpriv)) {
1687 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1688 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1689 if (!pp->sg_tbl[tag])
1690 goto out_port_free_dma_mem;
1691 } else {
1692 pp->sg_tbl[tag] = pp->sg_tbl[0];
1693 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1694 }
1695 }
933cb8e5
ML
1696
1697 spin_lock_irqsave(ap->lock, flags);
08da1759 1698 mv_save_cached_regs(ap);
66e57a2c 1699 mv_edma_cfg(ap, 0, 0);
933cb8e5
ML
1700 spin_unlock_irqrestore(ap->lock, flags);
1701
31961943 1702 return 0;
da2fa9ba
ML
1703
1704out_port_free_dma_mem:
1705 mv_port_free_dma_mem(ap);
1706 return -ENOMEM;
31961943
BR
1707}
1708
05b308e1
BR
1709/**
1710 * mv_port_stop - Port specific cleanup/stop routine.
1711 * @ap: ATA channel to manipulate
1712 *
1713 * Stop DMA, cleanup port memory.
1714 *
1715 * LOCKING:
cca3974e 1716 * This routine uses the host lock to protect the DMA stop.
05b308e1 1717 */
31961943
BR
1718static void mv_port_stop(struct ata_port *ap)
1719{
933cb8e5
ML
1720 unsigned long flags;
1721
1722 spin_lock_irqsave(ap->lock, flags);
e12bef50 1723 mv_stop_edma(ap);
88e675e1 1724 mv_enable_port_irqs(ap, 0);
933cb8e5 1725 spin_unlock_irqrestore(ap->lock, flags);
da2fa9ba 1726 mv_port_free_dma_mem(ap);
31961943
BR
1727}
1728
05b308e1
BR
1729/**
1730 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1731 * @qc: queued command whose SG list to source from
1732 *
1733 * Populate the SG list and mark the last entry.
1734 *
1735 * LOCKING:
1736 * Inherited from caller.
1737 */
6c08772e 1738static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1739{
1740 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1741 struct scatterlist *sg;
3be6cbd7 1742 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1743 unsigned int si;
31961943 1744
eb73d558 1745 mv_sg = pp->sg_tbl[qc->tag];
ff2aeb1e 1746 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1747 dma_addr_t addr = sg_dma_address(sg);
1748 u32 sg_len = sg_dma_len(sg);
22374677 1749
4007b493
OJ
1750 while (sg_len) {
1751 u32 offset = addr & 0xffff;
1752 u32 len = sg_len;
22374677 1753
32cd11a6 1754 if (offset + len > 0x10000)
4007b493
OJ
1755 len = 0x10000 - offset;
1756
1757 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1758 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1759 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
32cd11a6 1760 mv_sg->reserved = 0;
4007b493
OJ
1761
1762 sg_len -= len;
1763 addr += len;
1764
3be6cbd7 1765 last_sg = mv_sg;
4007b493 1766 mv_sg++;
4007b493 1767 }
31961943 1768 }
3be6cbd7
JG
1769
1770 if (likely(last_sg))
1771 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
32cd11a6 1772 mb(); /* ensure data structure is visible to the chipset */
31961943
BR
1773}
1774
5796d1c4 1775static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1776{
559eedad 1777 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1778 (last ? CRQB_CMD_LAST : 0);
559eedad 1779 *cmdw = cpu_to_le16(tmp);
31961943
BR
1780}
1781
da14265e
ML
1782/**
1783 * mv_sff_irq_clear - Clear hardware interrupt after DMA.
1784 * @ap: Port associated with this ATA transaction.
1785 *
1786 * We need this only for ATAPI bmdma transactions,
1787 * as otherwise we experience spurious interrupts
1788 * after libata-sff handles the bmdma interrupts.
1789 */
1790static void mv_sff_irq_clear(struct ata_port *ap)
1791{
1792 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1793}
1794
1795/**
1796 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1797 * @qc: queued command to check for chipset/DMA compatibility.
1798 *
1799 * The bmdma engines cannot handle speculative data sizes
1800 * (bytecount under/over flow). So only allow DMA for
1801 * data transfer commands with known data sizes.
1802 *
1803 * LOCKING:
1804 * Inherited from caller.
1805 */
1806static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1807{
1808 struct scsi_cmnd *scmd = qc->scsicmd;
1809
1810 if (scmd) {
1811 switch (scmd->cmnd[0]) {
1812 case READ_6:
1813 case READ_10:
1814 case READ_12:
1815 case WRITE_6:
1816 case WRITE_10:
1817 case WRITE_12:
1818 case GPCMD_READ_CD:
1819 case GPCMD_SEND_DVD_STRUCTURE:
1820 case GPCMD_SEND_CUE_SHEET:
1821 return 0; /* DMA is safe */
1822 }
1823 }
1824 return -EOPNOTSUPP; /* use PIO instead */
1825}
1826
1827/**
1828 * mv_bmdma_setup - Set up BMDMA transaction
1829 * @qc: queued command to prepare DMA for.
1830 *
1831 * LOCKING:
1832 * Inherited from caller.
1833 */
1834static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1835{
1836 struct ata_port *ap = qc->ap;
1837 void __iomem *port_mmio = mv_ap_base(ap);
1838 struct mv_port_priv *pp = ap->private_data;
1839
1840 mv_fill_sg(qc);
1841
1842 /* clear all DMA cmd bits */
cae5a29d 1843 writel(0, port_mmio + BMDMA_CMD);
da14265e
ML
1844
1845 /* load PRD table addr. */
1846 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
cae5a29d 1847 port_mmio + BMDMA_PRD_HIGH);
da14265e 1848 writelfl(pp->sg_tbl_dma[qc->tag],
cae5a29d 1849 port_mmio + BMDMA_PRD_LOW);
da14265e
ML
1850
1851 /* issue r/w command */
1852 ap->ops->sff_exec_command(ap, &qc->tf);
1853}
1854
1855/**
1856 * mv_bmdma_start - Start a BMDMA transaction
1857 * @qc: queued command to start DMA on.
1858 *
1859 * LOCKING:
1860 * Inherited from caller.
1861 */
1862static void mv_bmdma_start(struct ata_queued_cmd *qc)
1863{
1864 struct ata_port *ap = qc->ap;
1865 void __iomem *port_mmio = mv_ap_base(ap);
1866 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1867 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1868
1869 /* start host DMA transaction */
cae5a29d 1870 writelfl(cmd, port_mmio + BMDMA_CMD);
da14265e
ML
1871}
1872
1873/**
1874 * mv_bmdma_stop - Stop BMDMA transfer
1875 * @qc: queued command to stop DMA on.
1876 *
1877 * Clears the ATA_DMA_START flag in the bmdma control register
1878 *
1879 * LOCKING:
1880 * Inherited from caller.
1881 */
1882static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1883{
1884 struct ata_port *ap = qc->ap;
1885 void __iomem *port_mmio = mv_ap_base(ap);
1886 u32 cmd;
1887
1888 /* clear start/stop bit */
cae5a29d 1889 cmd = readl(port_mmio + BMDMA_CMD);
da14265e 1890 cmd &= ~ATA_DMA_START;
cae5a29d 1891 writelfl(cmd, port_mmio + BMDMA_CMD);
da14265e
ML
1892
1893 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1894 ata_sff_dma_pause(ap);
1895}
1896
1897/**
1898 * mv_bmdma_status - Read BMDMA status
1899 * @ap: port for which to retrieve DMA status.
1900 *
1901 * Read and return equivalent of the sff BMDMA status register.
1902 *
1903 * LOCKING:
1904 * Inherited from caller.
1905 */
1906static u8 mv_bmdma_status(struct ata_port *ap)
1907{
1908 void __iomem *port_mmio = mv_ap_base(ap);
1909 u32 reg, status;
1910
1911 /*
1912 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1913 * and the ATA_DMA_INTR bit doesn't exist.
1914 */
cae5a29d 1915 reg = readl(port_mmio + BMDMA_STATUS);
da14265e
ML
1916 if (reg & ATA_DMA_ACTIVE)
1917 status = ATA_DMA_ACTIVE;
1918 else
1919 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1920 return status;
1921}
1922
299b3f8d
ML
1923static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1924{
1925 struct ata_taskfile *tf = &qc->tf;
1926 /*
1927 * Workaround for 88SX60x1 FEr SATA#24.
1928 *
1929 * Chip may corrupt WRITEs if multi_count >= 4kB.
1930 * Note that READs are unaffected.
1931 *
1932 * It's not clear if this errata really means "4K bytes",
1933 * or if it always happens for multi_count > 7
1934 * regardless of device sector_size.
1935 *
1936 * So, for safety, any write with multi_count > 7
1937 * gets converted here into a regular PIO write instead:
1938 */
1939 if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
1940 if (qc->dev->multi_count > 7) {
1941 switch (tf->command) {
1942 case ATA_CMD_WRITE_MULTI:
1943 tf->command = ATA_CMD_PIO_WRITE;
1944 break;
1945 case ATA_CMD_WRITE_MULTI_FUA_EXT:
1946 tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
1947 /* fall through */
1948 case ATA_CMD_WRITE_MULTI_EXT:
1949 tf->command = ATA_CMD_PIO_WRITE_EXT;
1950 break;
1951 }
1952 }
1953 }
1954}
1955
05b308e1
BR
1956/**
1957 * mv_qc_prep - Host specific command preparation.
1958 * @qc: queued command to prepare
1959 *
1960 * This routine simply redirects to the general purpose routine
1961 * if command is not DMA. Else, it handles prep of the CRQB
1962 * (command request block), does some sanity checking, and calls
1963 * the SG load routine.
1964 *
1965 * LOCKING:
1966 * Inherited from caller.
1967 */
31961943
BR
1968static void mv_qc_prep(struct ata_queued_cmd *qc)
1969{
1970 struct ata_port *ap = qc->ap;
1971 struct mv_port_priv *pp = ap->private_data;
e1469874 1972 __le16 *cw;
8d2b450d 1973 struct ata_taskfile *tf = &qc->tf;
31961943 1974 u16 flags = 0;
a6432436 1975 unsigned in_index;
31961943 1976
299b3f8d
ML
1977 switch (tf->protocol) {
1978 case ATA_PROT_DMA:
1979 case ATA_PROT_NCQ:
1980 break; /* continue below */
1981 case ATA_PROT_PIO:
1982 mv_rw_multi_errata_sata24(qc);
31961943 1983 return;
299b3f8d
ML
1984 default:
1985 return;
1986 }
20f733e7 1987
31961943
BR
1988 /* Fill in command request block
1989 */
8d2b450d 1990 if (!(tf->flags & ATA_TFLAG_WRITE))
31961943 1991 flags |= CRQB_FLAG_READ;
beec7dbc 1992 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1993 flags |= qc->tag << CRQB_TAG_SHIFT;
e49856d8 1994 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
31961943 1995
bdd4ddde 1996 /* get current queue index from software */
fcfb1f77 1997 in_index = pp->req_idx;
a6432436
ML
1998
1999 pp->crqb[in_index].sg_addr =
eb73d558 2000 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
a6432436 2001 pp->crqb[in_index].sg_addr_hi =
eb73d558 2002 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
a6432436 2003 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 2004
a6432436 2005 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
2006
2007 /* Sadly, the CRQB cannot accomodate all registers--there are
2008 * only 11 bytes...so we must pick and choose required
2009 * registers based on the command. So, we drop feature and
2010 * hob_feature for [RW] DMA commands, but they are needed for
cd12e1f7
ML
2011 * NCQ. NCQ will drop hob_nsect, which is not needed there
2012 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
20f733e7 2013 */
31961943
BR
2014 switch (tf->command) {
2015 case ATA_CMD_READ:
2016 case ATA_CMD_READ_EXT:
2017 case ATA_CMD_WRITE:
2018 case ATA_CMD_WRITE_EXT:
c15d85c8 2019 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
2020 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2021 break;
31961943
BR
2022 case ATA_CMD_FPDMA_READ:
2023 case ATA_CMD_FPDMA_WRITE:
8b260248 2024 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
2025 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2026 break;
31961943
BR
2027 default:
2028 /* The only other commands EDMA supports in non-queued and
2029 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
2030 * of which are defined/used by Linux. If we get here, this
2031 * driver needs work.
2032 *
2033 * FIXME: modify libata to give qc_prep a return value and
2034 * return error here.
2035 */
2036 BUG_ON(tf->command);
2037 break;
2038 }
2039 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2040 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2041 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2042 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2043 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2044 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2045 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2046 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2047 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
2048
e4e7b892
JG
2049 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2050 return;
2051 mv_fill_sg(qc);
2052}
2053
2054/**
2055 * mv_qc_prep_iie - Host specific command preparation.
2056 * @qc: queued command to prepare
2057 *
2058 * This routine simply redirects to the general purpose routine
2059 * if command is not DMA. Else, it handles prep of the CRQB
2060 * (command request block), does some sanity checking, and calls
2061 * the SG load routine.
2062 *
2063 * LOCKING:
2064 * Inherited from caller.
2065 */
2066static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
2067{
2068 struct ata_port *ap = qc->ap;
2069 struct mv_port_priv *pp = ap->private_data;
2070 struct mv_crqb_iie *crqb;
8d2b450d 2071 struct ata_taskfile *tf = &qc->tf;
a6432436 2072 unsigned in_index;
e4e7b892
JG
2073 u32 flags = 0;
2074
8d2b450d
ML
2075 if ((tf->protocol != ATA_PROT_DMA) &&
2076 (tf->protocol != ATA_PROT_NCQ))
e4e7b892
JG
2077 return;
2078
e12bef50 2079 /* Fill in Gen IIE command request block */
8d2b450d 2080 if (!(tf->flags & ATA_TFLAG_WRITE))
e4e7b892
JG
2081 flags |= CRQB_FLAG_READ;
2082
beec7dbc 2083 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 2084 flags |= qc->tag << CRQB_TAG_SHIFT;
8c0aeb4a 2085 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
e49856d8 2086 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
e4e7b892 2087
bdd4ddde 2088 /* get current queue index from software */
fcfb1f77 2089 in_index = pp->req_idx;
a6432436
ML
2090
2091 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
eb73d558
ML
2092 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2093 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
e4e7b892
JG
2094 crqb->flags = cpu_to_le32(flags);
2095
e4e7b892
JG
2096 crqb->ata_cmd[0] = cpu_to_le32(
2097 (tf->command << 16) |
2098 (tf->feature << 24)
2099 );
2100 crqb->ata_cmd[1] = cpu_to_le32(
2101 (tf->lbal << 0) |
2102 (tf->lbam << 8) |
2103 (tf->lbah << 16) |
2104 (tf->device << 24)
2105 );
2106 crqb->ata_cmd[2] = cpu_to_le32(
2107 (tf->hob_lbal << 0) |
2108 (tf->hob_lbam << 8) |
2109 (tf->hob_lbah << 16) |
2110 (tf->hob_feature << 24)
2111 );
2112 crqb->ata_cmd[3] = cpu_to_le32(
2113 (tf->nsect << 0) |
2114 (tf->hob_nsect << 8)
2115 );
2116
2117 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 2118 return;
31961943
BR
2119 mv_fill_sg(qc);
2120}
2121
d16ab3f6
ML
2122/**
2123 * mv_sff_check_status - fetch device status, if valid
2124 * @ap: ATA port to fetch status from
2125 *
2126 * When using command issue via mv_qc_issue_fis(),
2127 * the initial ATA_BUSY state does not show up in the
2128 * ATA status (shadow) register. This can confuse libata!
2129 *
2130 * So we have a hook here to fake ATA_BUSY for that situation,
2131 * until the first time a BUSY, DRQ, or ERR bit is seen.
2132 *
2133 * The rest of the time, it simply returns the ATA status register.
2134 */
2135static u8 mv_sff_check_status(struct ata_port *ap)
2136{
2137 u8 stat = ioread8(ap->ioaddr.status_addr);
2138 struct mv_port_priv *pp = ap->private_data;
2139
2140 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2141 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2142 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2143 else
2144 stat = ATA_BUSY;
2145 }
2146 return stat;
2147}
2148
70f8b79c
ML
2149/**
2150 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2151 * @fis: fis to be sent
2152 * @nwords: number of 32-bit words in the fis
2153 */
2154static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2155{
2156 void __iomem *port_mmio = mv_ap_base(ap);
2157 u32 ifctl, old_ifctl, ifstat;
2158 int i, timeout = 200, final_word = nwords - 1;
2159
2160 /* Initiate FIS transmission mode */
cae5a29d 2161 old_ifctl = readl(port_mmio + SATA_IFCTL);
70f8b79c 2162 ifctl = 0x100 | (old_ifctl & 0xf);
cae5a29d 2163 writelfl(ifctl, port_mmio + SATA_IFCTL);
70f8b79c
ML
2164
2165 /* Send all words of the FIS except for the final word */
2166 for (i = 0; i < final_word; ++i)
cae5a29d 2167 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
70f8b79c
ML
2168
2169 /* Flag end-of-transmission, and then send the final word */
cae5a29d
ML
2170 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2171 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
70f8b79c
ML
2172
2173 /*
2174 * Wait for FIS transmission to complete.
2175 * This typically takes just a single iteration.
2176 */
2177 do {
cae5a29d 2178 ifstat = readl(port_mmio + SATA_IFSTAT);
70f8b79c
ML
2179 } while (!(ifstat & 0x1000) && --timeout);
2180
2181 /* Restore original port configuration */
cae5a29d 2182 writelfl(old_ifctl, port_mmio + SATA_IFCTL);
70f8b79c
ML
2183
2184 /* See if it worked */
2185 if ((ifstat & 0x3000) != 0x1000) {
2186 ata_port_printk(ap, KERN_WARNING,
2187 "%s transmission error, ifstat=%08x\n",
2188 __func__, ifstat);
2189 return AC_ERR_OTHER;
2190 }
2191 return 0;
2192}
2193
2194/**
2195 * mv_qc_issue_fis - Issue a command directly as a FIS
2196 * @qc: queued command to start
2197 *
2198 * Note that the ATA shadow registers are not updated
2199 * after command issue, so the device will appear "READY"
2200 * if polled, even while it is BUSY processing the command.
2201 *
2202 * So we use a status hook to fake ATA_BUSY until the drive changes state.
2203 *
2204 * Note: we don't get updated shadow regs on *completion*
2205 * of non-data commands. So avoid sending them via this function,
2206 * as they will appear to have completed immediately.
2207 *
2208 * GEN_IIE has special registers that we could get the result tf from,
2209 * but earlier chipsets do not. For now, we ignore those registers.
2210 */
2211static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2212{
2213 struct ata_port *ap = qc->ap;
2214 struct mv_port_priv *pp = ap->private_data;
2215 struct ata_link *link = qc->dev->link;
2216 u32 fis[5];
2217 int err = 0;
2218
2219 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
4c4a90fd 2220 err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
70f8b79c
ML
2221 if (err)
2222 return err;
2223
2224 switch (qc->tf.protocol) {
2225 case ATAPI_PROT_PIO:
2226 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2227 /* fall through */
2228 case ATAPI_PROT_NODATA:
2229 ap->hsm_task_state = HSM_ST_FIRST;
2230 break;
2231 case ATA_PROT_PIO:
2232 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2233 if (qc->tf.flags & ATA_TFLAG_WRITE)
2234 ap->hsm_task_state = HSM_ST_FIRST;
2235 else
2236 ap->hsm_task_state = HSM_ST;
2237 break;
2238 default:
2239 ap->hsm_task_state = HSM_ST_LAST;
2240 break;
2241 }
2242
2243 if (qc->tf.flags & ATA_TFLAG_POLLING)
2244 ata_pio_queue_task(ap, qc, 0);
2245 return 0;
2246}
2247
05b308e1
BR
2248/**
2249 * mv_qc_issue - Initiate a command to the host
2250 * @qc: queued command to start
2251 *
2252 * This routine simply redirects to the general purpose routine
2253 * if command is not DMA. Else, it sanity checks our local
2254 * caches of the request producer/consumer indices then enables
2255 * DMA and bumps the request producer index.
2256 *
2257 * LOCKING:
2258 * Inherited from caller.
2259 */
9a3d9eb0 2260static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 2261{
f48765cc 2262 static int limit_warnings = 10;
c5d3e45a
JG
2263 struct ata_port *ap = qc->ap;
2264 void __iomem *port_mmio = mv_ap_base(ap);
2265 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 2266 u32 in_index;
42ed893d 2267 unsigned int port_irqs;
f48765cc 2268
d16ab3f6
ML
2269 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2270
f48765cc
ML
2271 switch (qc->tf.protocol) {
2272 case ATA_PROT_DMA:
2273 case ATA_PROT_NCQ:
2274 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2275 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2276 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2277
2278 /* Write the request in pointer to kick the EDMA to life */
2279 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
cae5a29d 2280 port_mmio + EDMA_REQ_Q_IN_PTR);
f48765cc 2281 return 0;
31961943 2282
f48765cc 2283 case ATA_PROT_PIO:
c6112bd8
ML
2284 /*
2285 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2286 *
2287 * Someday, we might implement special polling workarounds
2288 * for these, but it all seems rather unnecessary since we
2289 * normally use only DMA for commands which transfer more
2290 * than a single block of data.
2291 *
2292 * Much of the time, this could just work regardless.
2293 * So for now, just log the incident, and allow the attempt.
2294 */
c7843e8f 2295 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
c6112bd8
ML
2296 --limit_warnings;
2297 ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
2298 ": attempting PIO w/multiple DRQ: "
2299 "this may fail due to h/w errata\n");
2300 }
f48765cc 2301 /* drop through */
42ed893d 2302 case ATA_PROT_NODATA:
f48765cc 2303 case ATAPI_PROT_PIO:
42ed893d
ML
2304 case ATAPI_PROT_NODATA:
2305 if (ap->flags & ATA_FLAG_PIO_POLLING)
2306 qc->tf.flags |= ATA_TFLAG_POLLING;
2307 break;
31961943 2308 }
42ed893d
ML
2309
2310 if (qc->tf.flags & ATA_TFLAG_POLLING)
2311 port_irqs = ERR_IRQ; /* mask device interrupt when polling */
2312 else
2313 port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
2314
2315 /*
2316 * We're about to send a non-EDMA capable command to the
2317 * port. Turn off EDMA so there won't be problems accessing
2318 * shadow block, etc registers.
2319 */
2320 mv_stop_edma(ap);
2321 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2322 mv_pmp_select(ap, qc->dev->link->pmp);
70f8b79c
ML
2323
2324 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2325 struct mv_host_priv *hpriv = ap->host->private_data;
2326 /*
2327 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
40f21b11 2328 *
70f8b79c
ML
2329 * After any NCQ error, the READ_LOG_EXT command
2330 * from libata-eh *must* use mv_qc_issue_fis().
2331 * Otherwise it might fail, due to chip errata.
2332 *
2333 * Rather than special-case it, we'll just *always*
2334 * use this method here for READ_LOG_EXT, making for
2335 * easier testing.
2336 */
2337 if (IS_GEN_II(hpriv))
2338 return mv_qc_issue_fis(qc);
2339 }
42ed893d 2340 return ata_sff_qc_issue(qc);
31961943
BR
2341}
2342
8f767f8a
ML
2343static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2344{
2345 struct mv_port_priv *pp = ap->private_data;
2346 struct ata_queued_cmd *qc;
2347
2348 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2349 return NULL;
2350 qc = ata_qc_from_tag(ap, ap->link.active_tag);
95db5051
ML
2351 if (qc) {
2352 if (qc->tf.flags & ATA_TFLAG_POLLING)
2353 qc = NULL;
2354 else if (!(qc->flags & ATA_QCFLAG_ACTIVE))
2355 qc = NULL;
2356 }
8f767f8a
ML
2357 return qc;
2358}
2359
29d187bb
ML
2360static void mv_pmp_error_handler(struct ata_port *ap)
2361{
2362 unsigned int pmp, pmp_map;
2363 struct mv_port_priv *pp = ap->private_data;
2364
2365 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2366 /*
2367 * Perform NCQ error analysis on failed PMPs
2368 * before we freeze the port entirely.
2369 *
2370 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2371 */
2372 pmp_map = pp->delayed_eh_pmp_map;
2373 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2374 for (pmp = 0; pmp_map != 0; pmp++) {
2375 unsigned int this_pmp = (1 << pmp);
2376 if (pmp_map & this_pmp) {
2377 struct ata_link *link = &ap->pmp_link[pmp];
2378 pmp_map &= ~this_pmp;
2379 ata_eh_analyze_ncq_error(link);
2380 }
2381 }
2382 ata_port_freeze(ap);
2383 }
2384 sata_pmp_error_handler(ap);
2385}
2386
4c299ca3
ML
2387static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2388{
2389 void __iomem *port_mmio = mv_ap_base(ap);
2390
cae5a29d 2391 return readl(port_mmio + SATA_TESTCTL) >> 16;
4c299ca3
ML
2392}
2393
4c299ca3
ML
2394static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2395{
2396 struct ata_eh_info *ehi;
2397 unsigned int pmp;
2398
2399 /*
2400 * Initialize EH info for PMPs which saw device errors
2401 */
2402 ehi = &ap->link.eh_info;
2403 for (pmp = 0; pmp_map != 0; pmp++) {
2404 unsigned int this_pmp = (1 << pmp);
2405 if (pmp_map & this_pmp) {
2406 struct ata_link *link = &ap->pmp_link[pmp];
2407
2408 pmp_map &= ~this_pmp;
2409 ehi = &link->eh_info;
2410 ata_ehi_clear_desc(ehi);
2411 ata_ehi_push_desc(ehi, "dev err");
2412 ehi->err_mask |= AC_ERR_DEV;
2413 ehi->action |= ATA_EH_RESET;
2414 ata_link_abort(link);
2415 }
2416 }
2417}
2418
06aaca3f
ML
2419static int mv_req_q_empty(struct ata_port *ap)
2420{
2421 void __iomem *port_mmio = mv_ap_base(ap);
2422 u32 in_ptr, out_ptr;
2423
cae5a29d 2424 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
06aaca3f 2425 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
cae5a29d 2426 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
06aaca3f
ML
2427 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2428 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
2429}
2430
4c299ca3
ML
2431static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2432{
2433 struct mv_port_priv *pp = ap->private_data;
2434 int failed_links;
2435 unsigned int old_map, new_map;
2436
2437 /*
2438 * Device error during FBS+NCQ operation:
2439 *
2440 * Set a port flag to prevent further I/O being enqueued.
2441 * Leave the EDMA running to drain outstanding commands from this port.
2442 * Perform the post-mortem/EH only when all responses are complete.
2443 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2444 */
2445 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2446 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2447 pp->delayed_eh_pmp_map = 0;
2448 }
2449 old_map = pp->delayed_eh_pmp_map;
2450 new_map = old_map | mv_get_err_pmp_map(ap);
2451
2452 if (old_map != new_map) {
2453 pp->delayed_eh_pmp_map = new_map;
2454 mv_pmp_eh_prep(ap, new_map & ~old_map);
2455 }
c46938cc 2456 failed_links = hweight16(new_map);
4c299ca3
ML
2457
2458 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
2459 "failed_links=%d nr_active_links=%d\n",
2460 __func__, pp->delayed_eh_pmp_map,
2461 ap->qc_active, failed_links,
2462 ap->nr_active_links);
2463
06aaca3f 2464 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
4c299ca3
ML
2465 mv_process_crpb_entries(ap, pp);
2466 mv_stop_edma(ap);
2467 mv_eh_freeze(ap);
2468 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
2469 return 1; /* handled */
2470 }
2471 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
2472 return 1; /* handled */
2473}
2474
2475static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2476{
2477 /*
2478 * Possible future enhancement:
2479 *
2480 * FBS+non-NCQ operation is not yet implemented.
2481 * See related notes in mv_edma_cfg().
2482 *
2483 * Device error during FBS+non-NCQ operation:
2484 *
2485 * We need to snapshot the shadow registers for each failed command.
2486 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2487 */
2488 return 0; /* not handled */
2489}
2490
2491static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2492{
2493 struct mv_port_priv *pp = ap->private_data;
2494
2495 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2496 return 0; /* EDMA was not active: not handled */
2497 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2498 return 0; /* FBS was not active: not handled */
2499
2500 if (!(edma_err_cause & EDMA_ERR_DEV))
2501 return 0; /* non DEV error: not handled */
2502 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2503 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2504 return 0; /* other problems: not handled */
2505
2506 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2507 /*
2508 * EDMA should NOT have self-disabled for this case.
2509 * If it did, then something is wrong elsewhere,
2510 * and we cannot handle it here.
2511 */
2512 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2513 ata_port_printk(ap, KERN_WARNING,
2514 "%s: err_cause=0x%x pp_flags=0x%x\n",
2515 __func__, edma_err_cause, pp->pp_flags);
2516 return 0; /* not handled */
2517 }
2518 return mv_handle_fbs_ncq_dev_err(ap);
2519 } else {
2520 /*
2521 * EDMA should have self-disabled for this case.
2522 * If it did not, then something is wrong elsewhere,
2523 * and we cannot handle it here.
2524 */
2525 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2526 ata_port_printk(ap, KERN_WARNING,
2527 "%s: err_cause=0x%x pp_flags=0x%x\n",
2528 __func__, edma_err_cause, pp->pp_flags);
2529 return 0; /* not handled */
2530 }
2531 return mv_handle_fbs_non_ncq_dev_err(ap);
2532 }
2533 return 0; /* not handled */
2534}
2535
a9010329 2536static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
8f767f8a 2537{
8f767f8a 2538 struct ata_eh_info *ehi = &ap->link.eh_info;
a9010329 2539 char *when = "idle";
8f767f8a 2540
8f767f8a 2541 ata_ehi_clear_desc(ehi);
c9abde12 2542 if (ap->flags & ATA_FLAG_DISABLED) {
a9010329
ML
2543 when = "disabled";
2544 } else if (edma_was_enabled) {
2545 when = "EDMA enabled";
8f767f8a
ML
2546 } else {
2547 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2548 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
a9010329 2549 when = "polling";
8f767f8a 2550 }
a9010329 2551 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
8f767f8a
ML
2552 ehi->err_mask |= AC_ERR_OTHER;
2553 ehi->action |= ATA_EH_RESET;
2554 ata_port_freeze(ap);
2555}
2556
05b308e1
BR
2557/**
2558 * mv_err_intr - Handle error interrupts on the port
2559 * @ap: ATA channel to manipulate
2560 *
8d07379d
ML
2561 * Most cases require a full reset of the chip's state machine,
2562 * which also performs a COMRESET.
2563 * Also, if the port disabled DMA, update our cached copy to match.
05b308e1
BR
2564 *
2565 * LOCKING:
2566 * Inherited from caller.
2567 */
37b9046a 2568static void mv_err_intr(struct ata_port *ap)
31961943
BR
2569{
2570 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde 2571 u32 edma_err_cause, eh_freeze_mask, serr = 0;
e4006077 2572 u32 fis_cause = 0;
bdd4ddde
JG
2573 struct mv_port_priv *pp = ap->private_data;
2574 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde 2575 unsigned int action = 0, err_mask = 0;
9af5c9c9 2576 struct ata_eh_info *ehi = &ap->link.eh_info;
37b9046a
ML
2577 struct ata_queued_cmd *qc;
2578 int abort = 0;
20f733e7 2579
8d07379d 2580 /*
37b9046a 2581 * Read and clear the SError and err_cause bits.
e4006077
ML
2582 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2583 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
8d07379d 2584 */
37b9046a
ML
2585 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2586 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2587
cae5a29d 2588 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
e4006077 2589 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
cae5a29d
ML
2590 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2591 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
e4006077 2592 }
cae5a29d 2593 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
bdd4ddde 2594
4c299ca3
ML
2595 if (edma_err_cause & EDMA_ERR_DEV) {
2596 /*
2597 * Device errors during FIS-based switching operation
2598 * require special handling.
2599 */
2600 if (mv_handle_dev_err(ap, edma_err_cause))
2601 return;
2602 }
2603
37b9046a
ML
2604 qc = mv_get_active_qc(ap);
2605 ata_ehi_clear_desc(ehi);
2606 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2607 edma_err_cause, pp->pp_flags);
e4006077 2608
c443c500 2609 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
e4006077 2610 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
cae5a29d 2611 if (fis_cause & FIS_IRQ_CAUSE_AN) {
c443c500
ML
2612 u32 ec = edma_err_cause &
2613 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2614 sata_async_notification(ap);
2615 if (!ec)
2616 return; /* Just an AN; no need for the nukes */
2617 ata_ehi_push_desc(ehi, "SDB notify");
2618 }
2619 }
bdd4ddde 2620 /*
352fab70 2621 * All generations share these EDMA error cause bits:
bdd4ddde 2622 */
37b9046a 2623 if (edma_err_cause & EDMA_ERR_DEV) {
bdd4ddde 2624 err_mask |= AC_ERR_DEV;
37b9046a
ML
2625 action |= ATA_EH_RESET;
2626 ata_ehi_push_desc(ehi, "dev error");
2627 }
bdd4ddde 2628 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 2629 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
2630 EDMA_ERR_INTRL_PAR)) {
2631 err_mask |= AC_ERR_ATA_BUS;
cf480626 2632 action |= ATA_EH_RESET;
b64bbc39 2633 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
2634 }
2635 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2636 ata_ehi_hotplugged(ehi);
2637 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 2638 "dev disconnect" : "dev connect");
cf480626 2639 action |= ATA_EH_RESET;
bdd4ddde
JG
2640 }
2641
352fab70
ML
2642 /*
2643 * Gen-I has a different SELF_DIS bit,
2644 * different FREEZE bits, and no SERR bit:
2645 */
ee9ccdf7 2646 if (IS_GEN_I(hpriv)) {
bdd4ddde 2647 eh_freeze_mask = EDMA_EH_FREEZE_5;
bdd4ddde 2648 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
bdd4ddde 2649 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 2650 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
2651 }
2652 } else {
2653 eh_freeze_mask = EDMA_EH_FREEZE;
bdd4ddde 2654 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
bdd4ddde 2655 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 2656 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde 2657 }
bdd4ddde 2658 if (edma_err_cause & EDMA_ERR_SERR) {
8d07379d
ML
2659 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2660 err_mask |= AC_ERR_ATA_BUS;
cf480626 2661 action |= ATA_EH_RESET;
bdd4ddde 2662 }
afb0edd9 2663 }
20f733e7 2664
bdd4ddde
JG
2665 if (!err_mask) {
2666 err_mask = AC_ERR_OTHER;
cf480626 2667 action |= ATA_EH_RESET;
bdd4ddde
JG
2668 }
2669
2670 ehi->serror |= serr;
2671 ehi->action |= action;
2672
2673 if (qc)
2674 qc->err_mask |= err_mask;
2675 else
2676 ehi->err_mask |= err_mask;
2677
37b9046a
ML
2678 if (err_mask == AC_ERR_DEV) {
2679 /*
2680 * Cannot do ata_port_freeze() here,
2681 * because it would kill PIO access,
2682 * which is needed for further diagnosis.
2683 */
2684 mv_eh_freeze(ap);
2685 abort = 1;
2686 } else if (edma_err_cause & eh_freeze_mask) {
2687 /*
2688 * Note to self: ata_port_freeze() calls ata_port_abort()
2689 */
bdd4ddde 2690 ata_port_freeze(ap);
37b9046a
ML
2691 } else {
2692 abort = 1;
2693 }
2694
2695 if (abort) {
2696 if (qc)
2697 ata_link_abort(qc->dev->link);
2698 else
2699 ata_port_abort(ap);
2700 }
bdd4ddde
JG
2701}
2702
fcfb1f77
ML
2703static void mv_process_crpb_response(struct ata_port *ap,
2704 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2705{
2706 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
2707
2708 if (qc) {
2709 u8 ata_status;
2710 u16 edma_status = le16_to_cpu(response->flags);
2711 /*
2712 * edma_status from a response queue entry:
cae5a29d 2713 * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
fcfb1f77
ML
2714 * MSB is saved ATA status from command completion.
2715 */
2716 if (!ncq_enabled) {
2717 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2718 if (err_cause) {
2719 /*
2720 * Error will be seen/handled by mv_err_intr().
2721 * So do nothing at all here.
2722 */
2723 return;
2724 }
2725 }
2726 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
37b9046a
ML
2727 if (!ac_err_mask(ata_status))
2728 ata_qc_complete(qc);
2729 /* else: leave it for mv_err_intr() */
fcfb1f77
ML
2730 } else {
2731 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
2732 __func__, tag);
2733 }
2734}
2735
2736static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
bdd4ddde
JG
2737{
2738 void __iomem *port_mmio = mv_ap_base(ap);
2739 struct mv_host_priv *hpriv = ap->host->private_data;
fcfb1f77 2740 u32 in_index;
bdd4ddde 2741 bool work_done = false;
fcfb1f77 2742 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
bdd4ddde 2743
fcfb1f77 2744 /* Get the hardware queue position index */
cae5a29d 2745 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
bdd4ddde
JG
2746 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2747
fcfb1f77
ML
2748 /* Process new responses from since the last time we looked */
2749 while (in_index != pp->resp_idx) {
6c1153e0 2750 unsigned int tag;
fcfb1f77 2751 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
bdd4ddde 2752
fcfb1f77 2753 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
bdd4ddde 2754
fcfb1f77
ML
2755 if (IS_GEN_I(hpriv)) {
2756 /* 50xx: no NCQ, only one command active at a time */
9af5c9c9 2757 tag = ap->link.active_tag;
fcfb1f77
ML
2758 } else {
2759 /* Gen II/IIE: get command tag from CRPB entry */
2760 tag = le16_to_cpu(response->id) & 0x1f;
bdd4ddde 2761 }
fcfb1f77 2762 mv_process_crpb_response(ap, response, tag, ncq_enabled);
bdd4ddde 2763 work_done = true;
bdd4ddde
JG
2764 }
2765
352fab70 2766 /* Update the software queue position index in hardware */
bdd4ddde
JG
2767 if (work_done)
2768 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
fcfb1f77 2769 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
cae5a29d 2770 port_mmio + EDMA_RSP_Q_OUT_PTR);
20f733e7
BR
2771}
2772
a9010329
ML
2773static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2774{
2775 struct mv_port_priv *pp;
2776 int edma_was_enabled;
2777
2778 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
2779 mv_unexpected_intr(ap, 0);
2780 return;
2781 }
2782 /*
2783 * Grab a snapshot of the EDMA_EN flag setting,
2784 * so that we have a consistent view for this port,
2785 * even if something we call of our routines changes it.
2786 */
2787 pp = ap->private_data;
2788 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2789 /*
2790 * Process completed CRPB response(s) before other events.
2791 */
2792 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2793 mv_process_crpb_entries(ap, pp);
4c299ca3
ML
2794 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2795 mv_handle_fbs_ncq_dev_err(ap);
a9010329
ML
2796 }
2797 /*
2798 * Handle chip-reported errors, or continue on to handle PIO.
2799 */
2800 if (unlikely(port_cause & ERR_IRQ)) {
2801 mv_err_intr(ap);
2802 } else if (!edma_was_enabled) {
2803 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2804 if (qc)
2805 ata_sff_host_intr(ap, qc);
2806 else
2807 mv_unexpected_intr(ap, edma_was_enabled);
2808 }
2809}
2810
05b308e1
BR
2811/**
2812 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 2813 * @host: host specific structure
7368f919 2814 * @main_irq_cause: Main interrupt cause register for the chip.
05b308e1
BR
2815 *
2816 * LOCKING:
2817 * Inherited from caller.
2818 */
7368f919 2819static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
20f733e7 2820{
f351b2d6 2821 struct mv_host_priv *hpriv = host->private_data;
eabd5eb1 2822 void __iomem *mmio = hpriv->base, *hc_mmio;
a3718c1f 2823 unsigned int handled = 0, port;
20f733e7 2824
2b748a0a
ML
2825 /* If asserted, clear the "all ports" IRQ coalescing bit */
2826 if (main_irq_cause & ALL_PORTS_COAL_DONE)
cae5a29d 2827 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2b748a0a 2828
a3718c1f 2829 for (port = 0; port < hpriv->n_ports; port++) {
cca3974e 2830 struct ata_port *ap = host->ports[port];
eabd5eb1
ML
2831 unsigned int p, shift, hardport, port_cause;
2832
a3718c1f 2833 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
a3718c1f 2834 /*
eabd5eb1
ML
2835 * Each hc within the host has its own hc_irq_cause register,
2836 * where the interrupting ports bits get ack'd.
a3718c1f 2837 */
eabd5eb1
ML
2838 if (hardport == 0) { /* first port on this hc ? */
2839 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2840 u32 port_mask, ack_irqs;
2841 /*
2842 * Skip this entire hc if nothing pending for any ports
2843 */
2844 if (!hc_cause) {
2845 port += MV_PORTS_PER_HC - 1;
2846 continue;
2847 }
2848 /*
2849 * We don't need/want to read the hc_irq_cause register,
2850 * because doing so hurts performance, and
2851 * main_irq_cause already gives us everything we need.
2852 *
2853 * But we do have to *write* to the hc_irq_cause to ack
2854 * the ports that we are handling this time through.
2855 *
2856 * This requires that we create a bitmap for those
2857 * ports which interrupted us, and use that bitmap
2858 * to ack (only) those ports via hc_irq_cause.
2859 */
2860 ack_irqs = 0;
2b748a0a
ML
2861 if (hc_cause & PORTS_0_3_COAL_DONE)
2862 ack_irqs = HC_COAL_IRQ;
eabd5eb1
ML
2863 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2864 if ((port + p) >= hpriv->n_ports)
2865 break;
2866 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2867 if (hc_cause & port_mask)
2868 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2869 }
a3718c1f 2870 hc_mmio = mv_hc_base_from_port(mmio, port);
cae5a29d 2871 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
a3718c1f
ML
2872 handled = 1;
2873 }
8f767f8a 2874 /*
a9010329 2875 * Handle interrupts signalled for this port:
8f767f8a 2876 */
a9010329
ML
2877 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2878 if (port_cause)
2879 mv_port_intr(ap, port_cause);
20f733e7 2880 }
a3718c1f 2881 return handled;
20f733e7
BR
2882}
2883
a3718c1f 2884static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
bdd4ddde 2885{
02a121da 2886 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
2887 struct ata_port *ap;
2888 struct ata_queued_cmd *qc;
2889 struct ata_eh_info *ehi;
2890 unsigned int i, err_mask, printed = 0;
2891 u32 err_cause;
2892
cae5a29d 2893 err_cause = readl(mmio + hpriv->irq_cause_offset);
bdd4ddde
JG
2894
2895 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
2896 err_cause);
2897
2898 DPRINTK("All regs @ PCI error\n");
2899 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2900
cae5a29d 2901 writelfl(0, mmio + hpriv->irq_cause_offset);
bdd4ddde
JG
2902
2903 for (i = 0; i < host->n_ports; i++) {
2904 ap = host->ports[i];
936fd732 2905 if (!ata_link_offline(&ap->link)) {
9af5c9c9 2906 ehi = &ap->link.eh_info;
bdd4ddde
JG
2907 ata_ehi_clear_desc(ehi);
2908 if (!printed++)
2909 ata_ehi_push_desc(ehi,
2910 "PCI err cause 0x%08x", err_cause);
2911 err_mask = AC_ERR_HOST_BUS;
cf480626 2912 ehi->action = ATA_EH_RESET;
9af5c9c9 2913 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
2914 if (qc)
2915 qc->err_mask |= err_mask;
2916 else
2917 ehi->err_mask |= err_mask;
2918
2919 ata_port_freeze(ap);
2920 }
2921 }
a3718c1f 2922 return 1; /* handled */
bdd4ddde
JG
2923}
2924
05b308e1 2925/**
c5d3e45a 2926 * mv_interrupt - Main interrupt event handler
05b308e1
BR
2927 * @irq: unused
2928 * @dev_instance: private data; in this case the host structure
05b308e1
BR
2929 *
2930 * Read the read only register to determine if any host
2931 * controllers have pending interrupts. If so, call lower level
2932 * routine to handle. Also check for PCI errors which are only
2933 * reported here.
2934 *
8b260248 2935 * LOCKING:
cca3974e 2936 * This routine holds the host lock while processing pending
05b308e1
BR
2937 * interrupts.
2938 */
7d12e780 2939static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 2940{
cca3974e 2941 struct ata_host *host = dev_instance;
f351b2d6 2942 struct mv_host_priv *hpriv = host->private_data;
a3718c1f 2943 unsigned int handled = 0;
6d3c30ef 2944 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
96e2c487 2945 u32 main_irq_cause, pending_irqs;
20f733e7 2946
646a4da5 2947 spin_lock(&host->lock);
6d3c30ef
ML
2948
2949 /* for MSI: block new interrupts while in here */
2950 if (using_msi)
2b748a0a 2951 mv_write_main_irq_mask(0, hpriv);
6d3c30ef 2952
7368f919 2953 main_irq_cause = readl(hpriv->main_irq_cause_addr);
96e2c487 2954 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
352fab70
ML
2955 /*
2956 * Deal with cases where we either have nothing pending, or have read
2957 * a bogus register value which can indicate HW removal or PCI fault.
20f733e7 2958 */
a44253d2 2959 if (pending_irqs && main_irq_cause != 0xffffffffU) {
1f398472 2960 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
a3718c1f
ML
2961 handled = mv_pci_error(host, hpriv->base);
2962 else
a44253d2 2963 handled = mv_host_intr(host, pending_irqs);
bdd4ddde 2964 }
6d3c30ef
ML
2965
2966 /* for MSI: unmask; interrupt cause bits will retrigger now */
2967 if (using_msi)
2b748a0a 2968 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
6d3c30ef 2969
9d51af7b
ML
2970 spin_unlock(&host->lock);
2971
20f733e7
BR
2972 return IRQ_RETVAL(handled);
2973}
2974
c9d39130
JG
2975static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
2976{
2977 unsigned int ofs;
2978
2979 switch (sc_reg_in) {
2980 case SCR_STATUS:
2981 case SCR_ERROR:
2982 case SCR_CONTROL:
2983 ofs = sc_reg_in * sizeof(u32);
2984 break;
2985 default:
2986 ofs = 0xffffffffU;
2987 break;
2988 }
2989 return ofs;
2990}
2991
82ef04fb 2992static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
c9d39130 2993{
82ef04fb 2994 struct mv_host_priv *hpriv = link->ap->host->private_data;
f351b2d6 2995 void __iomem *mmio = hpriv->base;
82ef04fb 2996 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
c9d39130
JG
2997 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2998
da3dbb17
TH
2999 if (ofs != 0xffffffffU) {
3000 *val = readl(addr + ofs);
3001 return 0;
3002 } else
3003 return -EINVAL;
c9d39130
JG
3004}
3005
82ef04fb 3006static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
c9d39130 3007{
82ef04fb 3008 struct mv_host_priv *hpriv = link->ap->host->private_data;
f351b2d6 3009 void __iomem *mmio = hpriv->base;
82ef04fb 3010 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
c9d39130
JG
3011 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3012
da3dbb17 3013 if (ofs != 0xffffffffU) {
0d5ff566 3014 writelfl(val, addr + ofs);
da3dbb17
TH
3015 return 0;
3016 } else
3017 return -EINVAL;
c9d39130
JG
3018}
3019
7bb3c529 3020static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
522479fb 3021{
7bb3c529 3022 struct pci_dev *pdev = to_pci_dev(host->dev);
522479fb
JG
3023 int early_5080;
3024
44c10138 3025 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
3026
3027 if (!early_5080) {
3028 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3029 tmp |= (1 << 0);
3030 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3031 }
3032
7bb3c529 3033 mv_reset_pci_bus(host, mmio);
522479fb
JG
3034}
3035
3036static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3037{
cae5a29d 3038 writel(0x0fcfffff, mmio + FLASH_CTL);
522479fb
JG
3039}
3040
47c2b677 3041static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
3042 void __iomem *mmio)
3043{
c9d39130
JG
3044 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3045 u32 tmp;
3046
3047 tmp = readl(phy_mmio + MV5_PHY_MODE);
3048
3049 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
3050 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
3051}
3052
47c2b677 3053static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 3054{
522479fb
JG
3055 u32 tmp;
3056
cae5a29d 3057 writel(0, mmio + GPIO_PORT_CTL);
522479fb
JG
3058
3059 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
3060
3061 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3062 tmp |= ~(1 << 0);
3063 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
3064}
3065
2a47ce06
JG
3066static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3067 unsigned int port)
bca1c4eb 3068{
c9d39130
JG
3069 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3070 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3071 u32 tmp;
3072 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3073
3074 if (fix_apm_sq) {
cae5a29d 3075 tmp = readl(phy_mmio + MV5_LTMODE);
c9d39130 3076 tmp |= (1 << 19);
cae5a29d 3077 writel(tmp, phy_mmio + MV5_LTMODE);
c9d39130 3078
cae5a29d 3079 tmp = readl(phy_mmio + MV5_PHY_CTL);
c9d39130
JG
3080 tmp &= ~0x3;
3081 tmp |= 0x1;
cae5a29d 3082 writel(tmp, phy_mmio + MV5_PHY_CTL);
c9d39130
JG
3083 }
3084
3085 tmp = readl(phy_mmio + MV5_PHY_MODE);
3086 tmp &= ~mask;
3087 tmp |= hpriv->signal[port].pre;
3088 tmp |= hpriv->signal[port].amps;
3089 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
3090}
3091
c9d39130
JG
3092
3093#undef ZERO
3094#define ZERO(reg) writel(0, port_mmio + (reg))
3095static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3096 unsigned int port)
3097{
3098 void __iomem *port_mmio = mv_port_base(mmio, port);
3099
e12bef50 3100 mv_reset_channel(hpriv, mmio, port);
c9d39130
JG
3101
3102 ZERO(0x028); /* command */
cae5a29d 3103 writel(0x11f, port_mmio + EDMA_CFG);
c9d39130
JG
3104 ZERO(0x004); /* timer */
3105 ZERO(0x008); /* irq err cause */
3106 ZERO(0x00c); /* irq err mask */
3107 ZERO(0x010); /* rq bah */
3108 ZERO(0x014); /* rq inp */
3109 ZERO(0x018); /* rq outp */
3110 ZERO(0x01c); /* respq bah */
3111 ZERO(0x024); /* respq outp */
3112 ZERO(0x020); /* respq inp */
3113 ZERO(0x02c); /* test control */
cae5a29d 3114 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
c9d39130
JG
3115}
3116#undef ZERO
3117
3118#define ZERO(reg) writel(0, hc_mmio + (reg))
3119static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3120 unsigned int hc)
47c2b677 3121{
c9d39130
JG
3122 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3123 u32 tmp;
3124
3125 ZERO(0x00c);
3126 ZERO(0x010);
3127 ZERO(0x014);
3128 ZERO(0x018);
3129
3130 tmp = readl(hc_mmio + 0x20);
3131 tmp &= 0x1c1c1c1c;
3132 tmp |= 0x03030303;
3133 writel(tmp, hc_mmio + 0x20);
3134}
3135#undef ZERO
3136
3137static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3138 unsigned int n_hc)
3139{
3140 unsigned int hc, port;
3141
3142 for (hc = 0; hc < n_hc; hc++) {
3143 for (port = 0; port < MV_PORTS_PER_HC; port++)
3144 mv5_reset_hc_port(hpriv, mmio,
3145 (hc * MV_PORTS_PER_HC) + port);
3146
3147 mv5_reset_one_hc(hpriv, mmio, hc);
3148 }
3149
3150 return 0;
47c2b677
JG
3151}
3152
101ffae2
JG
3153#undef ZERO
3154#define ZERO(reg) writel(0, mmio + (reg))
7bb3c529 3155static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
101ffae2 3156{
02a121da 3157 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
3158 u32 tmp;
3159
cae5a29d 3160 tmp = readl(mmio + MV_PCI_MODE);
101ffae2 3161 tmp &= 0xff00ffff;
cae5a29d 3162 writel(tmp, mmio + MV_PCI_MODE);
101ffae2
JG
3163
3164 ZERO(MV_PCI_DISC_TIMER);
3165 ZERO(MV_PCI_MSI_TRIGGER);
cae5a29d 3166 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
101ffae2 3167 ZERO(MV_PCI_SERR_MASK);
cae5a29d
ML
3168 ZERO(hpriv->irq_cause_offset);
3169 ZERO(hpriv->irq_mask_offset);
101ffae2
JG
3170 ZERO(MV_PCI_ERR_LOW_ADDRESS);
3171 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3172 ZERO(MV_PCI_ERR_ATTRIBUTE);
3173 ZERO(MV_PCI_ERR_COMMAND);
3174}
3175#undef ZERO
3176
3177static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3178{
3179 u32 tmp;
3180
3181 mv5_reset_flash(hpriv, mmio);
3182
cae5a29d 3183 tmp = readl(mmio + GPIO_PORT_CTL);
101ffae2
JG
3184 tmp &= 0x3;
3185 tmp |= (1 << 5) | (1 << 6);
cae5a29d 3186 writel(tmp, mmio + GPIO_PORT_CTL);
101ffae2
JG
3187}
3188
3189/**
3190 * mv6_reset_hc - Perform the 6xxx global soft reset
3191 * @mmio: base address of the HBA
3192 *
3193 * This routine only applies to 6xxx parts.
3194 *
3195 * LOCKING:
3196 * Inherited from caller.
3197 */
c9d39130
JG
3198static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3199 unsigned int n_hc)
101ffae2 3200{
cae5a29d 3201 void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
101ffae2
JG
3202 int i, rc = 0;
3203 u32 t;
3204
3205 /* Following procedure defined in PCI "main command and status
3206 * register" table.
3207 */
3208 t = readl(reg);
3209 writel(t | STOP_PCI_MASTER, reg);
3210
3211 for (i = 0; i < 1000; i++) {
3212 udelay(1);
3213 t = readl(reg);
2dcb407e 3214 if (PCI_MASTER_EMPTY & t)
101ffae2 3215 break;
101ffae2
JG
3216 }
3217 if (!(PCI_MASTER_EMPTY & t)) {
3218 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3219 rc = 1;
3220 goto done;
3221 }
3222
3223 /* set reset */
3224 i = 5;
3225 do {
3226 writel(t | GLOB_SFT_RST, reg);
3227 t = readl(reg);
3228 udelay(1);
3229 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3230
3231 if (!(GLOB_SFT_RST & t)) {
3232 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3233 rc = 1;
3234 goto done;
3235 }
3236
3237 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
3238 i = 5;
3239 do {
3240 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3241 t = readl(reg);
3242 udelay(1);
3243 } while ((GLOB_SFT_RST & t) && (i-- > 0));
3244
3245 if (GLOB_SFT_RST & t) {
3246 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3247 rc = 1;
3248 }
3249done:
3250 return rc;
3251}
3252
47c2b677 3253static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
3254 void __iomem *mmio)
3255{
3256 void __iomem *port_mmio;
3257 u32 tmp;
3258
cae5a29d 3259 tmp = readl(mmio + RESET_CFG);
ba3fe8fb 3260 if ((tmp & (1 << 0)) == 0) {
47c2b677 3261 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
3262 hpriv->signal[idx].pre = 0x1 << 5;
3263 return;
3264 }
3265
3266 port_mmio = mv_port_base(mmio, idx);
3267 tmp = readl(port_mmio + PHY_MODE2);
3268
3269 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3270 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3271}
3272
47c2b677 3273static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 3274{
cae5a29d 3275 writel(0x00000060, mmio + GPIO_PORT_CTL);
ba3fe8fb
JG
3276}
3277
c9d39130 3278static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 3279 unsigned int port)
bca1c4eb 3280{
c9d39130
JG
3281 void __iomem *port_mmio = mv_port_base(mmio, port);
3282
bca1c4eb 3283 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
3284 int fix_phy_mode2 =
3285 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 3286 int fix_phy_mode4 =
47c2b677 3287 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
8c30a8b9 3288 u32 m2, m3;
47c2b677
JG
3289
3290 if (fix_phy_mode2) {
3291 m2 = readl(port_mmio + PHY_MODE2);
3292 m2 &= ~(1 << 16);
3293 m2 |= (1 << 31);
3294 writel(m2, port_mmio + PHY_MODE2);
3295
3296 udelay(200);
3297
3298 m2 = readl(port_mmio + PHY_MODE2);
3299 m2 &= ~((1 << 16) | (1 << 31));
3300 writel(m2, port_mmio + PHY_MODE2);
3301
3302 udelay(200);
3303 }
3304
8c30a8b9
ML
3305 /*
3306 * Gen-II/IIe PHY_MODE3 errata RM#2:
3307 * Achieves better receiver noise performance than the h/w default:
3308 */
3309 m3 = readl(port_mmio + PHY_MODE3);
3310 m3 = (m3 & 0x1f) | (0x5555601 << 5);
bca1c4eb 3311
0388a8c0
ML
3312 /* Guideline 88F5182 (GL# SATA-S11) */
3313 if (IS_SOC(hpriv))
3314 m3 &= ~0x1c;
3315
bca1c4eb 3316 if (fix_phy_mode4) {
ba069e37
ML
3317 u32 m4 = readl(port_mmio + PHY_MODE4);
3318 /*
3319 * Enforce reserved-bit restrictions on GenIIe devices only.
3320 * For earlier chipsets, force only the internal config field
3321 * (workaround for errata FEr SATA#10 part 1).
3322 */
8c30a8b9 3323 if (IS_GEN_IIE(hpriv))
ba069e37
ML
3324 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3325 else
3326 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
8c30a8b9 3327 writel(m4, port_mmio + PHY_MODE4);
bca1c4eb 3328 }
b406c7a6
ML
3329 /*
3330 * Workaround for 60x1-B2 errata SATA#13:
3331 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3332 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
ba68460b 3333 * Or ensure we use writelfl() when writing PHY_MODE4.
b406c7a6
ML
3334 */
3335 writel(m3, port_mmio + PHY_MODE3);
bca1c4eb
JG
3336
3337 /* Revert values of pre-emphasis and signal amps to the saved ones */
3338 m2 = readl(port_mmio + PHY_MODE2);
3339
3340 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
3341 m2 |= hpriv->signal[port].amps;
3342 m2 |= hpriv->signal[port].pre;
47c2b677 3343 m2 &= ~(1 << 16);
bca1c4eb 3344
e4e7b892
JG
3345 /* according to mvSata 3.6.1, some IIE values are fixed */
3346 if (IS_GEN_IIE(hpriv)) {
3347 m2 &= ~0xC30FF01F;
3348 m2 |= 0x0000900F;
3349 }
3350
bca1c4eb
JG
3351 writel(m2, port_mmio + PHY_MODE2);
3352}
3353
f351b2d6
SB
3354/* TODO: use the generic LED interface to configure the SATA Presence */
3355/* & Acitivy LEDs on the board */
3356static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3357 void __iomem *mmio)
3358{
3359 return;
3360}
3361
3362static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3363 void __iomem *mmio)
3364{
3365 void __iomem *port_mmio;
3366 u32 tmp;
3367
3368 port_mmio = mv_port_base(mmio, idx);
3369 tmp = readl(port_mmio + PHY_MODE2);
3370
3371 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3372 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3373}
3374
3375#undef ZERO
3376#define ZERO(reg) writel(0, port_mmio + (reg))
3377static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3378 void __iomem *mmio, unsigned int port)
3379{
3380 void __iomem *port_mmio = mv_port_base(mmio, port);
3381
e12bef50 3382 mv_reset_channel(hpriv, mmio, port);
f351b2d6
SB
3383
3384 ZERO(0x028); /* command */
cae5a29d 3385 writel(0x101f, port_mmio + EDMA_CFG);
f351b2d6
SB
3386 ZERO(0x004); /* timer */
3387 ZERO(0x008); /* irq err cause */
3388 ZERO(0x00c); /* irq err mask */
3389 ZERO(0x010); /* rq bah */
3390 ZERO(0x014); /* rq inp */
3391 ZERO(0x018); /* rq outp */
3392 ZERO(0x01c); /* respq bah */
3393 ZERO(0x024); /* respq outp */
3394 ZERO(0x020); /* respq inp */
3395 ZERO(0x02c); /* test control */
d7b0c143 3396 writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
f351b2d6
SB
3397}
3398
3399#undef ZERO
3400
3401#define ZERO(reg) writel(0, hc_mmio + (reg))
3402static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3403 void __iomem *mmio)
3404{
3405 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3406
3407 ZERO(0x00c);
3408 ZERO(0x010);
3409 ZERO(0x014);
3410
3411}
3412
3413#undef ZERO
3414
3415static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3416 void __iomem *mmio, unsigned int n_hc)
3417{
3418 unsigned int port;
3419
3420 for (port = 0; port < hpriv->n_ports; port++)
3421 mv_soc_reset_hc_port(hpriv, mmio, port);
3422
3423 mv_soc_reset_one_hc(hpriv, mmio);
3424
3425 return 0;
3426}
3427
3428static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3429 void __iomem *mmio)
3430{
3431 return;
3432}
3433
3434static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3435{
3436 return;
3437}
3438
29b7e43c
MM
3439static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3440 void __iomem *mmio, unsigned int port)
3441{
3442 void __iomem *port_mmio = mv_port_base(mmio, port);
3443 u32 reg;
3444
3445 reg = readl(port_mmio + PHY_MODE3);
3446 reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */
3447 reg |= (0x1 << 27);
3448 reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */
3449 reg |= (0x1 << 29);
3450 writel(reg, port_mmio + PHY_MODE3);
3451
3452 reg = readl(port_mmio + PHY_MODE4);
3453 reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
3454 reg |= (0x1 << 16);
3455 writel(reg, port_mmio + PHY_MODE4);
3456
3457 reg = readl(port_mmio + PHY_MODE9_GEN2);
3458 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3459 reg |= 0x8;
3460 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3461 writel(reg, port_mmio + PHY_MODE9_GEN2);
3462
3463 reg = readl(port_mmio + PHY_MODE9_GEN1);
3464 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3465 reg |= 0x8;
3466 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3467 writel(reg, port_mmio + PHY_MODE9_GEN1);
3468}
3469
3470/**
3471 * soc_is_65 - check if the soc is 65 nano device
3472 *
3473 * Detect the type of the SoC, this is done by reading the PHYCFG_OFS
3474 * register, this register should contain non-zero value and it exists only
3475 * in the 65 nano devices, when reading it from older devices we get 0.
3476 */
3477static bool soc_is_65n(struct mv_host_priv *hpriv)
3478{
3479 void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3480
3481 if (readl(port0_mmio + PHYCFG_OFS))
3482 return true;
3483 return false;
3484}
3485
8e7decdb 3486static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
b67a1064 3487{
cae5a29d 3488 u32 ifcfg = readl(port_mmio + SATA_IFCFG);
b67a1064 3489
8e7decdb 3490 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
b67a1064 3491 if (want_gen2i)
8e7decdb 3492 ifcfg |= (1 << 7); /* enable gen2i speed */
cae5a29d 3493 writelfl(ifcfg, port_mmio + SATA_IFCFG);
b67a1064
ML
3494}
3495
e12bef50 3496static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
c9d39130
JG
3497 unsigned int port_no)
3498{
3499 void __iomem *port_mmio = mv_port_base(mmio, port_no);
3500
8e7decdb
ML
3501 /*
3502 * The datasheet warns against setting EDMA_RESET when EDMA is active
3503 * (but doesn't say what the problem might be). So we first try
3504 * to disable the EDMA engine before doing the EDMA_RESET operation.
3505 */
0d8be5cb 3506 mv_stop_edma_engine(port_mmio);
cae5a29d 3507 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
c9d39130 3508
b67a1064 3509 if (!IS_GEN_I(hpriv)) {
8e7decdb
ML
3510 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3511 mv_setup_ifcfg(port_mmio, 1);
c9d39130 3512 }
b67a1064 3513 /*
8e7decdb 3514 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
b67a1064 3515 * link, and physical layers. It resets all SATA interface registers
cae5a29d 3516 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
c9d39130 3517 */
cae5a29d 3518 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
b67a1064 3519 udelay(25); /* allow reset propagation */
cae5a29d 3520 writelfl(0, port_mmio + EDMA_CMD);
c9d39130
JG
3521
3522 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3523
ee9ccdf7 3524 if (IS_GEN_I(hpriv))
c9d39130
JG
3525 mdelay(1);
3526}
3527
e49856d8 3528static void mv_pmp_select(struct ata_port *ap, int pmp)
20f733e7 3529{
e49856d8
ML
3530 if (sata_pmp_supported(ap)) {
3531 void __iomem *port_mmio = mv_ap_base(ap);
cae5a29d 3532 u32 reg = readl(port_mmio + SATA_IFCTL);
e49856d8 3533 int old = reg & 0xf;
22374677 3534
e49856d8
ML
3535 if (old != pmp) {
3536 reg = (reg & ~0xf) | pmp;
cae5a29d 3537 writelfl(reg, port_mmio + SATA_IFCTL);
e49856d8 3538 }
22374677 3539 }
20f733e7
BR
3540}
3541
e49856d8
ML
3542static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3543 unsigned long deadline)
22374677 3544{
e49856d8
ML
3545 mv_pmp_select(link->ap, sata_srst_pmp(link));
3546 return sata_std_hardreset(link, class, deadline);
3547}
bdd4ddde 3548
e49856d8
ML
3549static int mv_softreset(struct ata_link *link, unsigned int *class,
3550 unsigned long deadline)
3551{
3552 mv_pmp_select(link->ap, sata_srst_pmp(link));
3553 return ata_sff_softreset(link, class, deadline);
22374677
JG
3554}
3555
cc0680a5 3556static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 3557 unsigned long deadline)
31961943 3558{
cc0680a5 3559 struct ata_port *ap = link->ap;
bdd4ddde 3560 struct mv_host_priv *hpriv = ap->host->private_data;
b562468c 3561 struct mv_port_priv *pp = ap->private_data;
f351b2d6 3562 void __iomem *mmio = hpriv->base;
0d8be5cb
ML
3563 int rc, attempts = 0, extra = 0;
3564 u32 sstatus;
3565 bool online;
31961943 3566
e12bef50 3567 mv_reset_channel(hpriv, mmio, ap->port_no);
b562468c 3568 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
d16ab3f6
ML
3569 pp->pp_flags &=
3570 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
bdd4ddde 3571
0d8be5cb
ML
3572 /* Workaround for errata FEr SATA#10 (part 2) */
3573 do {
17c5aab5
ML
3574 const unsigned long *timing =
3575 sata_ehc_deb_timing(&link->eh_context);
bdd4ddde 3576
17c5aab5
ML
3577 rc = sata_link_hardreset(link, timing, deadline + extra,
3578 &online, NULL);
9dcffd99 3579 rc = online ? -EAGAIN : rc;
17c5aab5 3580 if (rc)
0d8be5cb 3581 return rc;
0d8be5cb
ML
3582 sata_scr_read(link, SCR_STATUS, &sstatus);
3583 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3584 /* Force 1.5gb/s link speed and try again */
8e7decdb 3585 mv_setup_ifcfg(mv_ap_base(ap), 0);
0d8be5cb
ML
3586 if (time_after(jiffies + HZ, deadline))
3587 extra = HZ; /* only extend it once, max */
3588 }
3589 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
08da1759 3590 mv_save_cached_regs(ap);
66e57a2c 3591 mv_edma_cfg(ap, 0, 0);
bdd4ddde 3592
17c5aab5 3593 return rc;
bdd4ddde
JG
3594}
3595
bdd4ddde
JG
3596static void mv_eh_freeze(struct ata_port *ap)
3597{
1cfd19ae 3598 mv_stop_edma(ap);
c4de573b 3599 mv_enable_port_irqs(ap, 0);
bdd4ddde
JG
3600}
3601
3602static void mv_eh_thaw(struct ata_port *ap)
3603{
f351b2d6 3604 struct mv_host_priv *hpriv = ap->host->private_data;
c4de573b
ML
3605 unsigned int port = ap->port_no;
3606 unsigned int hardport = mv_hardport_from_port(port);
1cfd19ae 3607 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
bdd4ddde 3608 void __iomem *port_mmio = mv_ap_base(ap);
c4de573b 3609 u32 hc_irq_cause;
bdd4ddde 3610
bdd4ddde 3611 /* clear EDMA errors on this port */
cae5a29d 3612 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
bdd4ddde
JG
3613
3614 /* clear pending irq events */
cae6edc3 3615 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
cae5a29d 3616 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
bdd4ddde 3617
88e675e1 3618 mv_enable_port_irqs(ap, ERR_IRQ);
31961943
BR
3619}
3620
05b308e1
BR
3621/**
3622 * mv_port_init - Perform some early initialization on a single port.
3623 * @port: libata data structure storing shadow register addresses
3624 * @port_mmio: base address of the port
3625 *
3626 * Initialize shadow register mmio addresses, clear outstanding
3627 * interrupts on the port, and unmask interrupts for the future
3628 * start of the port.
3629 *
3630 * LOCKING:
3631 * Inherited from caller.
3632 */
31961943 3633static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 3634{
cae5a29d 3635 void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
31961943 3636
8b260248 3637 /* PIO related setup
31961943
BR
3638 */
3639 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 3640 port->error_addr =
31961943
BR
3641 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3642 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3643 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3644 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3645 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3646 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 3647 port->status_addr =
31961943
BR
3648 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3649 /* special case: control/altstatus doesn't have ATA_REG_ address */
cae5a29d 3650 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
31961943
BR
3651
3652 /* unused: */
8d9db2d2 3653 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 3654
31961943 3655 /* Clear any currently outstanding port interrupt conditions */
cae5a29d
ML
3656 serr = port_mmio + mv_scr_offset(SCR_ERROR);
3657 writelfl(readl(serr), serr);
3658 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
31961943 3659
646a4da5 3660 /* unmask all non-transient EDMA error interrupts */
cae5a29d 3661 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
20f733e7 3662
8b260248 3663 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
cae5a29d
ML
3664 readl(port_mmio + EDMA_CFG),
3665 readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3666 readl(port_mmio + EDMA_ERR_IRQ_MASK));
20f733e7
BR
3667}
3668
616d4a98
ML
3669static unsigned int mv_in_pcix_mode(struct ata_host *host)
3670{
3671 struct mv_host_priv *hpriv = host->private_data;
3672 void __iomem *mmio = hpriv->base;
3673 u32 reg;
3674
1f398472 3675 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
616d4a98 3676 return 0; /* not PCI-X capable */
cae5a29d 3677 reg = readl(mmio + MV_PCI_MODE);
616d4a98
ML
3678 if ((reg & MV_PCI_MODE_MASK) == 0)
3679 return 0; /* conventional PCI mode */
3680 return 1; /* chip is in PCI-X mode */
3681}
3682
3683static int mv_pci_cut_through_okay(struct ata_host *host)
3684{
3685 struct mv_host_priv *hpriv = host->private_data;
3686 void __iomem *mmio = hpriv->base;
3687 u32 reg;
3688
3689 if (!mv_in_pcix_mode(host)) {
cae5a29d
ML
3690 reg = readl(mmio + MV_PCI_COMMAND);
3691 if (reg & MV_PCI_COMMAND_MRDTRIG)
616d4a98
ML
3692 return 0; /* not okay */
3693 }
3694 return 1; /* okay */
3695}
3696
65ad7fef
ML
3697static void mv_60x1b2_errata_pci7(struct ata_host *host)
3698{
3699 struct mv_host_priv *hpriv = host->private_data;
3700 void __iomem *mmio = hpriv->base;
3701
3702 /* workaround for 60x1-B2 errata PCI#7 */
3703 if (mv_in_pcix_mode(host)) {
cae5a29d
ML
3704 u32 reg = readl(mmio + MV_PCI_COMMAND);
3705 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
65ad7fef
ML
3706 }
3707}
3708
4447d351 3709static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 3710{
4447d351
TH
3711 struct pci_dev *pdev = to_pci_dev(host->dev);
3712 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
3713 u32 hp_flags = hpriv->hp_flags;
3714
5796d1c4 3715 switch (board_idx) {
47c2b677
JG
3716 case chip_5080:
3717 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 3718 hp_flags |= MV_HP_GEN_I;
47c2b677 3719
44c10138 3720 switch (pdev->revision) {
47c2b677
JG
3721 case 0x1:
3722 hp_flags |= MV_HP_ERRATA_50XXB0;
3723 break;
3724 case 0x3:
3725 hp_flags |= MV_HP_ERRATA_50XXB2;
3726 break;
3727 default:
3728 dev_printk(KERN_WARNING, &pdev->dev,
3729 "Applying 50XXB2 workarounds to unknown rev\n");
3730 hp_flags |= MV_HP_ERRATA_50XXB2;
3731 break;
3732 }
3733 break;
3734
bca1c4eb
JG
3735 case chip_504x:
3736 case chip_508x:
47c2b677 3737 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 3738 hp_flags |= MV_HP_GEN_I;
bca1c4eb 3739
44c10138 3740 switch (pdev->revision) {
47c2b677
JG
3741 case 0x0:
3742 hp_flags |= MV_HP_ERRATA_50XXB0;
3743 break;
3744 case 0x3:
3745 hp_flags |= MV_HP_ERRATA_50XXB2;
3746 break;
3747 default:
3748 dev_printk(KERN_WARNING, &pdev->dev,
3749 "Applying B2 workarounds to unknown rev\n");
3750 hp_flags |= MV_HP_ERRATA_50XXB2;
3751 break;
bca1c4eb
JG
3752 }
3753 break;
3754
3755 case chip_604x:
3756 case chip_608x:
47c2b677 3757 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 3758 hp_flags |= MV_HP_GEN_II;
47c2b677 3759
44c10138 3760 switch (pdev->revision) {
47c2b677 3761 case 0x7:
65ad7fef 3762 mv_60x1b2_errata_pci7(host);
47c2b677
JG
3763 hp_flags |= MV_HP_ERRATA_60X1B2;
3764 break;
3765 case 0x9:
3766 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
3767 break;
3768 default:
3769 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
3770 "Applying B2 workarounds to unknown rev\n");
3771 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
3772 break;
3773 }
3774 break;
3775
e4e7b892 3776 case chip_7042:
616d4a98 3777 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
306b30f7
ML
3778 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3779 (pdev->device == 0x2300 || pdev->device == 0x2310))
3780 {
4e520033
ML
3781 /*
3782 * Highpoint RocketRAID PCIe 23xx series cards:
3783 *
3784 * Unconfigured drives are treated as "Legacy"
3785 * by the BIOS, and it overwrites sector 8 with
3786 * a "Lgcy" metadata block prior to Linux boot.
3787 *
3788 * Configured drives (RAID or JBOD) leave sector 8
3789 * alone, but instead overwrite a high numbered
3790 * sector for the RAID metadata. This sector can
3791 * be determined exactly, by truncating the physical
3792 * drive capacity to a nice even GB value.
3793 *
3794 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3795 *
3796 * Warn the user, lest they think we're just buggy.
3797 */
3798 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3799 " BIOS CORRUPTS DATA on all attached drives,"
3800 " regardless of if/how they are configured."
3801 " BEWARE!\n");
3802 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3803 " use sectors 8-9 on \"Legacy\" drives,"
3804 " and avoid the final two gigabytes on"
3805 " all RocketRAID BIOS initialized drives.\n");
306b30f7 3806 }
8e7decdb 3807 /* drop through */
e4e7b892
JG
3808 case chip_6042:
3809 hpriv->ops = &mv6xxx_ops;
e4e7b892 3810 hp_flags |= MV_HP_GEN_IIE;
616d4a98
ML
3811 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3812 hp_flags |= MV_HP_CUT_THROUGH;
e4e7b892 3813
44c10138 3814 switch (pdev->revision) {
5cf73bfb 3815 case 0x2: /* Rev.B0: the first/only public release */
e4e7b892
JG
3816 hp_flags |= MV_HP_ERRATA_60X1C0;
3817 break;
3818 default:
3819 dev_printk(KERN_WARNING, &pdev->dev,
3820 "Applying 60X1C0 workarounds to unknown rev\n");
3821 hp_flags |= MV_HP_ERRATA_60X1C0;
3822 break;
3823 }
3824 break;
f351b2d6 3825 case chip_soc:
29b7e43c
MM
3826 if (soc_is_65n(hpriv))
3827 hpriv->ops = &mv_soc_65n_ops;
3828 else
3829 hpriv->ops = &mv_soc_ops;
eb3a55a9
SB
3830 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3831 MV_HP_ERRATA_60X1C0;
f351b2d6 3832 break;
e4e7b892 3833
bca1c4eb 3834 default:
f351b2d6 3835 dev_printk(KERN_ERR, host->dev,
5796d1c4 3836 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
3837 return 1;
3838 }
3839
3840 hpriv->hp_flags = hp_flags;
02a121da 3841 if (hp_flags & MV_HP_PCIE) {
cae5a29d
ML
3842 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3843 hpriv->irq_mask_offset = PCIE_IRQ_MASK;
02a121da
ML
3844 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3845 } else {
cae5a29d
ML
3846 hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3847 hpriv->irq_mask_offset = PCI_IRQ_MASK;
02a121da
ML
3848 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3849 }
bca1c4eb
JG
3850
3851 return 0;
3852}
3853
05b308e1 3854/**
47c2b677 3855 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
3856 * @host: ATA host to initialize
3857 * @board_idx: controller index
05b308e1
BR
3858 *
3859 * If possible, do an early global reset of the host. Then do
3860 * our port init and clear/unmask all/relevant host interrupts.
3861 *
3862 * LOCKING:
3863 * Inherited from caller.
3864 */
4447d351 3865static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
3866{
3867 int rc = 0, n_hc, port, hc;
4447d351 3868 struct mv_host_priv *hpriv = host->private_data;
f351b2d6 3869 void __iomem *mmio = hpriv->base;
47c2b677 3870
4447d351 3871 rc = mv_chip_id(host, board_idx);
bca1c4eb 3872 if (rc)
352fab70 3873 goto done;
f351b2d6 3874
1f398472 3875 if (IS_SOC(hpriv)) {
cae5a29d
ML
3876 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3877 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
1f398472 3878 } else {
cae5a29d
ML
3879 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3880 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
f351b2d6 3881 }
352fab70 3882
5d0fb2e7
TR
3883 /* initialize shadow irq mask with register's value */
3884 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3885
352fab70 3886 /* global interrupt mask: 0 == mask everything */
c4de573b 3887 mv_set_main_irq_mask(host, ~0, 0);
bca1c4eb 3888
4447d351 3889 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 3890
4447d351 3891 for (port = 0; port < host->n_ports; port++)
29b7e43c
MM
3892 if (hpriv->ops->read_preamp)
3893 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 3894
c9d39130 3895 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 3896 if (rc)
20f733e7 3897 goto done;
20f733e7 3898
522479fb 3899 hpriv->ops->reset_flash(hpriv, mmio);
7bb3c529 3900 hpriv->ops->reset_bus(host, mmio);
47c2b677 3901 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 3902
4447d351 3903 for (port = 0; port < host->n_ports; port++) {
cbcdd875 3904 struct ata_port *ap = host->ports[port];
2a47ce06 3905 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
3906
3907 mv_port_init(&ap->ioaddr, port_mmio);
3908
7bb3c529 3909#ifdef CONFIG_PCI
1f398472 3910 if (!IS_SOC(hpriv)) {
f351b2d6
SB
3911 unsigned int offset = port_mmio - mmio;
3912 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
3913 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
3914 }
7bb3c529 3915#endif
20f733e7
BR
3916 }
3917
3918 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
3919 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3920
3921 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3922 "(before clear)=0x%08x\n", hc,
cae5a29d
ML
3923 readl(hc_mmio + HC_CFG),
3924 readl(hc_mmio + HC_IRQ_CAUSE));
31961943
BR
3925
3926 /* Clear any currently outstanding hc interrupt conditions */
cae5a29d 3927 writelfl(0, hc_mmio + HC_IRQ_CAUSE);
20f733e7
BR
3928 }
3929
44c65d16
ML
3930 if (!IS_SOC(hpriv)) {
3931 /* Clear any currently outstanding host interrupt conditions */
cae5a29d 3932 writelfl(0, mmio + hpriv->irq_cause_offset);
31961943 3933
44c65d16 3934 /* and unmask interrupt generation for host regs */
cae5a29d 3935 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
44c65d16 3936 }
51de32d2 3937
6be96ac1
ML
3938 /*
3939 * enable only global host interrupts for now.
3940 * The per-port interrupts get done later as ports are set up.
3941 */
3942 mv_set_main_irq_mask(host, 0, PCI_ERR);
2b748a0a
ML
3943 mv_set_irq_coalescing(host, irq_coalescing_io_count,
3944 irq_coalescing_usecs);
f351b2d6
SB
3945done:
3946 return rc;
3947}
fb621e2f 3948
fbf14e2f
BB
3949static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3950{
3951 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3952 MV_CRQB_Q_SZ, 0);
3953 if (!hpriv->crqb_pool)
3954 return -ENOMEM;
3955
3956 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3957 MV_CRPB_Q_SZ, 0);
3958 if (!hpriv->crpb_pool)
3959 return -ENOMEM;
3960
3961 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3962 MV_SG_TBL_SZ, 0);
3963 if (!hpriv->sg_tbl_pool)
3964 return -ENOMEM;
3965
3966 return 0;
3967}
3968
15a32632
LB
3969static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3970 struct mbus_dram_target_info *dram)
3971{
3972 int i;
3973
3974 for (i = 0; i < 4; i++) {
3975 writel(0, hpriv->base + WINDOW_CTRL(i));
3976 writel(0, hpriv->base + WINDOW_BASE(i));
3977 }
3978
3979 for (i = 0; i < dram->num_cs; i++) {
3980 struct mbus_dram_window *cs = dram->cs + i;
3981
3982 writel(((cs->size - 1) & 0xffff0000) |
3983 (cs->mbus_attr << 8) |
3984 (dram->mbus_dram_target_id << 4) | 1,
3985 hpriv->base + WINDOW_CTRL(i));
3986 writel(cs->base, hpriv->base + WINDOW_BASE(i));
3987 }
3988}
3989
f351b2d6
SB
3990/**
3991 * mv_platform_probe - handle a positive probe of an soc Marvell
3992 * host
3993 * @pdev: platform device found
3994 *
3995 * LOCKING:
3996 * Inherited from caller.
3997 */
3998static int mv_platform_probe(struct platform_device *pdev)
3999{
4000 static int printed_version;
4001 const struct mv_sata_platform_data *mv_platform_data;
4002 const struct ata_port_info *ppi[] =
4003 { &mv_port_info[chip_soc], NULL };
4004 struct ata_host *host;
4005 struct mv_host_priv *hpriv;
4006 struct resource *res;
4007 int n_ports, rc;
20f733e7 4008
f351b2d6
SB
4009 if (!printed_version++)
4010 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
bca1c4eb 4011
f351b2d6
SB
4012 /*
4013 * Simple resource validation ..
4014 */
4015 if (unlikely(pdev->num_resources != 2)) {
4016 dev_err(&pdev->dev, "invalid number of resources\n");
4017 return -EINVAL;
4018 }
4019
4020 /*
4021 * Get the register base first
4022 */
4023 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4024 if (res == NULL)
4025 return -EINVAL;
4026
4027 /* allocate host */
4028 mv_platform_data = pdev->dev.platform_data;
4029 n_ports = mv_platform_data->n_ports;
4030
4031 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4032 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4033
4034 if (!host || !hpriv)
4035 return -ENOMEM;
4036 host->private_data = hpriv;
4037 hpriv->n_ports = n_ports;
4038
4039 host->iomap = NULL;
f1cb0ea1 4040 hpriv->base = devm_ioremap(&pdev->dev, res->start,
041b5eac 4041 resource_size(res));
cae5a29d 4042 hpriv->base -= SATAHC0_REG_BASE;
f351b2d6 4043
15a32632
LB
4044 /*
4045 * (Re-)program MBUS remapping windows if we are asked to.
4046 */
4047 if (mv_platform_data->dram != NULL)
4048 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
4049
fbf14e2f
BB
4050 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4051 if (rc)
4052 return rc;
4053
f351b2d6
SB
4054 /* initialize adapter */
4055 rc = mv_init_host(host, chip_soc);
4056 if (rc)
4057 return rc;
4058
4059 dev_printk(KERN_INFO, &pdev->dev,
4060 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
4061 host->n_ports);
4062
4063 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
4064 IRQF_SHARED, &mv6_sht);
4065}
4066
4067/*
4068 *
4069 * mv_platform_remove - unplug a platform interface
4070 * @pdev: platform device
4071 *
4072 * A platform bus SATA device has been unplugged. Perform the needed
4073 * cleanup. Also called on module unload for any active devices.
4074 */
4075static int __devexit mv_platform_remove(struct platform_device *pdev)
4076{
4077 struct device *dev = &pdev->dev;
4078 struct ata_host *host = dev_get_drvdata(dev);
f351b2d6
SB
4079
4080 ata_host_detach(host);
f351b2d6 4081 return 0;
20f733e7
BR
4082}
4083
f351b2d6
SB
4084static struct platform_driver mv_platform_driver = {
4085 .probe = mv_platform_probe,
4086 .remove = __devexit_p(mv_platform_remove),
4087 .driver = {
4088 .name = DRV_NAME,
4089 .owner = THIS_MODULE,
4090 },
4091};
4092
4093
7bb3c529 4094#ifdef CONFIG_PCI
f351b2d6
SB
4095static int mv_pci_init_one(struct pci_dev *pdev,
4096 const struct pci_device_id *ent);
4097
7bb3c529
SB
4098
4099static struct pci_driver mv_pci_driver = {
4100 .name = DRV_NAME,
4101 .id_table = mv_pci_tbl,
f351b2d6 4102 .probe = mv_pci_init_one,
7bb3c529
SB
4103 .remove = ata_pci_remove_one,
4104};
4105
7bb3c529
SB
4106/* move to PCI layer or libata core? */
4107static int pci_go_64(struct pci_dev *pdev)
4108{
4109 int rc;
4110
6a35528a
YH
4111 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4112 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
7bb3c529 4113 if (rc) {
284901a9 4114 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
7bb3c529
SB
4115 if (rc) {
4116 dev_printk(KERN_ERR, &pdev->dev,
4117 "64-bit DMA enable failed\n");
4118 return rc;
4119 }
4120 }
4121 } else {
284901a9 4122 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7bb3c529
SB
4123 if (rc) {
4124 dev_printk(KERN_ERR, &pdev->dev,
4125 "32-bit DMA enable failed\n");
4126 return rc;
4127 }
284901a9 4128 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
7bb3c529
SB
4129 if (rc) {
4130 dev_printk(KERN_ERR, &pdev->dev,
4131 "32-bit consistent DMA enable failed\n");
4132 return rc;
4133 }
4134 }
4135
4136 return rc;
4137}
4138
05b308e1
BR
4139/**
4140 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 4141 * @host: ATA host to print info about
05b308e1
BR
4142 *
4143 * FIXME: complete this.
4144 *
4145 * LOCKING:
4146 * Inherited from caller.
4147 */
4447d351 4148static void mv_print_info(struct ata_host *host)
31961943 4149{
4447d351
TH
4150 struct pci_dev *pdev = to_pci_dev(host->dev);
4151 struct mv_host_priv *hpriv = host->private_data;
44c10138 4152 u8 scc;
c1e4fe71 4153 const char *scc_s, *gen;
31961943
BR
4154
4155 /* Use this to determine the HW stepping of the chip so we know
4156 * what errata to workaround
4157 */
31961943
BR
4158 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4159 if (scc == 0)
4160 scc_s = "SCSI";
4161 else if (scc == 0x01)
4162 scc_s = "RAID";
4163 else
c1e4fe71
JG
4164 scc_s = "?";
4165
4166 if (IS_GEN_I(hpriv))
4167 gen = "I";
4168 else if (IS_GEN_II(hpriv))
4169 gen = "II";
4170 else if (IS_GEN_IIE(hpriv))
4171 gen = "IIE";
4172 else
4173 gen = "?";
31961943 4174
a9524a76 4175 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
4176 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4177 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
4178 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4179}
4180
05b308e1 4181/**
f351b2d6 4182 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
05b308e1
BR
4183 * @pdev: PCI device found
4184 * @ent: PCI device ID entry for the matched host
4185 *
4186 * LOCKING:
4187 * Inherited from caller.
4188 */
f351b2d6
SB
4189static int mv_pci_init_one(struct pci_dev *pdev,
4190 const struct pci_device_id *ent)
20f733e7 4191{
2dcb407e 4192 static int printed_version;
20f733e7 4193 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
4194 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4195 struct ata_host *host;
4196 struct mv_host_priv *hpriv;
4197 int n_ports, rc;
20f733e7 4198
a9524a76
JG
4199 if (!printed_version++)
4200 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 4201
4447d351
TH
4202 /* allocate host */
4203 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4204
4205 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4206 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4207 if (!host || !hpriv)
4208 return -ENOMEM;
4209 host->private_data = hpriv;
f351b2d6 4210 hpriv->n_ports = n_ports;
4447d351
TH
4211
4212 /* acquire resources */
24dc5f33
TH
4213 rc = pcim_enable_device(pdev);
4214 if (rc)
20f733e7 4215 return rc;
20f733e7 4216
0d5ff566
TH
4217 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4218 if (rc == -EBUSY)
24dc5f33 4219 pcim_pin_device(pdev);
0d5ff566 4220 if (rc)
24dc5f33 4221 return rc;
4447d351 4222 host->iomap = pcim_iomap_table(pdev);
f351b2d6 4223 hpriv->base = host->iomap[MV_PRIMARY_BAR];
20f733e7 4224
d88184fb
JG
4225 rc = pci_go_64(pdev);
4226 if (rc)
4227 return rc;
4228
da2fa9ba
ML
4229 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4230 if (rc)
4231 return rc;
4232
20f733e7 4233 /* initialize adapter */
4447d351 4234 rc = mv_init_host(host, board_idx);
24dc5f33
TH
4235 if (rc)
4236 return rc;
20f733e7 4237
6d3c30ef
ML
4238 /* Enable message-switched interrupts, if requested */
4239 if (msi && pci_enable_msi(pdev) == 0)
4240 hpriv->hp_flags |= MV_HP_FLAG_MSI;
20f733e7 4241
31961943 4242 mv_dump_pci_cfg(pdev, 0x68);
4447d351 4243 mv_print_info(host);
20f733e7 4244
4447d351 4245 pci_set_master(pdev);
ea8b4db9 4246 pci_try_set_mwi(pdev);
4447d351 4247 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 4248 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7 4249}
7bb3c529 4250#endif
20f733e7 4251
f351b2d6
SB
4252static int mv_platform_probe(struct platform_device *pdev);
4253static int __devexit mv_platform_remove(struct platform_device *pdev);
4254
20f733e7
BR
4255static int __init mv_init(void)
4256{
7bb3c529
SB
4257 int rc = -ENODEV;
4258#ifdef CONFIG_PCI
4259 rc = pci_register_driver(&mv_pci_driver);
f351b2d6
SB
4260 if (rc < 0)
4261 return rc;
4262#endif
4263 rc = platform_driver_register(&mv_platform_driver);
4264
4265#ifdef CONFIG_PCI
4266 if (rc < 0)
4267 pci_unregister_driver(&mv_pci_driver);
7bb3c529
SB
4268#endif
4269 return rc;
20f733e7
BR
4270}
4271
4272static void __exit mv_exit(void)
4273{
7bb3c529 4274#ifdef CONFIG_PCI
20f733e7 4275 pci_unregister_driver(&mv_pci_driver);
7bb3c529 4276#endif
f351b2d6 4277 platform_driver_unregister(&mv_platform_driver);
20f733e7
BR
4278}
4279
4280MODULE_AUTHOR("Brett Russ");
4281MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4282MODULE_LICENSE("GPL");
4283MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4284MODULE_VERSION(DRV_VERSION);
17c5aab5 4285MODULE_ALIAS("platform:" DRV_NAME);
20f733e7
BR
4286
4287module_init(mv_init);
4288module_exit(mv_exit);