]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/sata_mv.c
[libata] pdc_adma: convert to new exception handling (EH) framework
[net-next-2.6.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
4a05e209
JG
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
4a05e209
JG
38 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
20f733e7
BR
61#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
20f733e7 68#include <linux/dma-mapping.h>
a9524a76 69#include <linux/device.h>
20f733e7 70#include <scsi/scsi_host.h>
193515d5 71#include <scsi/scsi_cmnd.h>
6c08772e 72#include <scsi/scsi_device.h>
20f733e7 73#include <linux/libata.h>
20f733e7
BR
74
75#define DRV_NAME "sata_mv"
6c08772e 76#define DRV_VERSION "1.01"
20f733e7
BR
77
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
20f733e7 95 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 96 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
20f733e7
BR
99
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
31961943
BR
105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
20f733e7
BR
119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
31961943 122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
c5d3e45a 128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
47c2b677 131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 132
31961943
BR
133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
20f733e7
BR
147 /* PCI interface registers */
148
31961943
BR
149 PCI_COMMAND_OFS = 0xc00,
150
20f733e7
BR
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
522479fb
JG
156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
171 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
172 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
173 PORT0_ERR = (1 << 0), /* shift by port # */
174 PORT0_DONE = (1 << 1), /* shift by port # */
175 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
176 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
177 PCI_ERR = (1 << 18),
178 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
179 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
180 PORTS_0_3_COAL_DONE = (1 << 8),
181 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
182 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
183 GPIO_INT = (1 << 22),
184 SELF_INT = (1 << 23),
185 TWSI_INT = (1 << 24),
186 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 187 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
8b260248 188 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
189 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
190 HC_MAIN_RSVD),
fb621e2f
JG
191 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
192 HC_MAIN_RSVD_5),
20f733e7
BR
193
194 /* SATAHC registers */
195 HC_CFG_OFS = 0,
196
197 HC_IRQ_CAUSE_OFS = 0x14,
31961943 198 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
199 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
200 DEV_IRQ = (1 << 8), /* shift by port # */
201
202 /* Shadow block registers */
31961943
BR
203 SHD_BLK_OFS = 0x100,
204 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
205
206 /* SATA registers */
207 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
208 SATA_ACTIVE_OFS = 0x350,
47c2b677 209 PHY_MODE3 = 0x310,
bca1c4eb
JG
210 PHY_MODE4 = 0x314,
211 PHY_MODE2 = 0x330,
c9d39130
JG
212 MV5_PHY_MODE = 0x74,
213 MV5_LT_MODE = 0x30,
214 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
215 SATA_INTERFACE_CTL = 0x050,
216
217 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
218
219 /* Port registers */
220 EDMA_CFG_OFS = 0,
31961943
BR
221 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
222 EDMA_CFG_NCQ = (1 << 5),
223 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
224 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
225 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
226
227 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
228 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
229 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
230 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
231 EDMA_ERR_DEV = (1 << 2), /* device error */
232 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
233 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
234 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
235 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
236 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 237 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 238 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
239 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
240 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
241 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
242 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
243 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
20f733e7 244 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
6c1153e0
JG
245 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
246 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
247 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
248 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
249 EDMA_ERR_OVERRUN_5 = (1 << 5),
250 EDMA_ERR_UNDERRUN_5 = (1 << 6),
bdd4ddde
JG
251 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
252 EDMA_ERR_PRD_PAR |
253 EDMA_ERR_DEV_DCON |
254 EDMA_ERR_DEV_CON |
255 EDMA_ERR_SERR |
256 EDMA_ERR_SELF_DIS |
6c1153e0 257 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
258 EDMA_ERR_CRPB_PAR |
259 EDMA_ERR_INTRL_PAR |
260 EDMA_ERR_IORDY |
261 EDMA_ERR_LNK_CTRL_RX_2 |
262 EDMA_ERR_LNK_DATA_RX |
263 EDMA_ERR_LNK_DATA_TX |
264 EDMA_ERR_TRANS_PROTO,
265 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
266 EDMA_ERR_PRD_PAR |
267 EDMA_ERR_DEV_DCON |
268 EDMA_ERR_DEV_CON |
269 EDMA_ERR_OVERRUN_5 |
270 EDMA_ERR_UNDERRUN_5 |
271 EDMA_ERR_SELF_DIS_5 |
6c1153e0 272 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
273 EDMA_ERR_CRPB_PAR |
274 EDMA_ERR_INTRL_PAR |
275 EDMA_ERR_IORDY,
20f733e7 276
31961943
BR
277 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
278 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
279
280 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
281 EDMA_REQ_Q_PTR_SHIFT = 5,
282
283 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
284 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
285 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
286 EDMA_RSP_Q_PTR_SHIFT = 3,
287
0ea9e179
JG
288 EDMA_CMD_OFS = 0x28, /* EDMA command register */
289 EDMA_EN = (1 << 0), /* enable EDMA */
290 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
291 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 292
c9d39130 293 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 294 EDMA_ARB_CFG = 0x38,
bca1c4eb 295
31961943
BR
296 /* Host private flags (hp_flags) */
297 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
298 MV_HP_ERRATA_50XXB0 = (1 << 1),
299 MV_HP_ERRATA_50XXB2 = (1 << 2),
300 MV_HP_ERRATA_60X1B2 = (1 << 3),
301 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 302 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
303 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
304 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
305 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
20f733e7 306
31961943 307 /* Port private flags (pp_flags) */
0ea9e179
JG
308 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
309 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
310};
311
ee9ccdf7
JG
312#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
313#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 314#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
bca1c4eb 315
095fec88 316enum {
baf14aa1
JG
317 /* DMA boundary 0xffff is required by the s/g splitting
318 * we need on /length/ in mv_fill-sg().
319 */
320 MV_DMA_BOUNDARY = 0xffffU,
095fec88 321
0ea9e179
JG
322 /* mask of register bits containing lower 32 bits
323 * of EDMA request queue DMA address
324 */
095fec88
JG
325 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
326
0ea9e179 327 /* ditto, for response queue */
095fec88
JG
328 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
329};
330
522479fb
JG
331enum chip_type {
332 chip_504x,
333 chip_508x,
334 chip_5080,
335 chip_604x,
336 chip_608x,
e4e7b892
JG
337 chip_6042,
338 chip_7042,
522479fb
JG
339};
340
31961943
BR
341/* Command ReQuest Block: 32B */
342struct mv_crqb {
e1469874
ML
343 __le32 sg_addr;
344 __le32 sg_addr_hi;
345 __le16 ctrl_flags;
346 __le16 ata_cmd[11];
31961943 347};
20f733e7 348
e4e7b892 349struct mv_crqb_iie {
e1469874
ML
350 __le32 addr;
351 __le32 addr_hi;
352 __le32 flags;
353 __le32 len;
354 __le32 ata_cmd[4];
e4e7b892
JG
355};
356
31961943
BR
357/* Command ResPonse Block: 8B */
358struct mv_crpb {
e1469874
ML
359 __le16 id;
360 __le16 flags;
361 __le32 tmstmp;
20f733e7
BR
362};
363
31961943
BR
364/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
365struct mv_sg {
e1469874
ML
366 __le32 addr;
367 __le32 flags_size;
368 __le32 addr_hi;
369 __le32 reserved;
31961943 370};
20f733e7 371
31961943
BR
372struct mv_port_priv {
373 struct mv_crqb *crqb;
374 dma_addr_t crqb_dma;
375 struct mv_crpb *crpb;
376 dma_addr_t crpb_dma;
377 struct mv_sg *sg_tbl;
378 dma_addr_t sg_tbl_dma;
bdd4ddde
JG
379
380 unsigned int req_idx;
381 unsigned int resp_idx;
382
31961943
BR
383 u32 pp_flags;
384};
385
bca1c4eb
JG
386struct mv_port_signal {
387 u32 amps;
388 u32 pre;
389};
390
47c2b677
JG
391struct mv_host_priv;
392struct mv_hw_ops {
2a47ce06
JG
393 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
394 unsigned int port);
47c2b677
JG
395 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
396 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
397 void __iomem *mmio);
c9d39130
JG
398 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
399 unsigned int n_hc);
522479fb
JG
400 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
401 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
47c2b677
JG
402};
403
31961943
BR
404struct mv_host_priv {
405 u32 hp_flags;
bca1c4eb 406 struct mv_port_signal signal[8];
47c2b677 407 const struct mv_hw_ops *ops;
20f733e7
BR
408};
409
410static void mv_irq_clear(struct ata_port *ap);
da3dbb17
TH
411static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
412static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
413static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
414static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
415static int mv_port_start(struct ata_port *ap);
416static void mv_port_stop(struct ata_port *ap);
417static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 418static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 419static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde
JG
420static void mv_error_handler(struct ata_port *ap);
421static void mv_post_int_cmd(struct ata_queued_cmd *qc);
422static void mv_eh_freeze(struct ata_port *ap);
423static void mv_eh_thaw(struct ata_port *ap);
6c08772e 424static int mv_slave_config(struct scsi_device *sdev);
20f733e7
BR
425static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
426
2a47ce06
JG
427static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
428 unsigned int port);
47c2b677
JG
429static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
430static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
431 void __iomem *mmio);
c9d39130
JG
432static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
433 unsigned int n_hc);
522479fb
JG
434static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
435static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
47c2b677 436
2a47ce06
JG
437static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
438 unsigned int port);
47c2b677
JG
439static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
440static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
441 void __iomem *mmio);
c9d39130
JG
442static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
443 unsigned int n_hc);
522479fb
JG
444static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
445static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
c9d39130
JG
446static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
447 unsigned int port_no);
47c2b677 448
c5d3e45a
JG
449static struct scsi_host_template mv5_sht = {
450 .module = THIS_MODULE,
451 .name = DRV_NAME,
452 .ioctl = ata_scsi_ioctl,
453 .queuecommand = ata_scsi_queuecmd,
454 .can_queue = ATA_DEF_QUEUE,
455 .this_id = ATA_SHT_THIS_ID,
baf14aa1 456 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a
JG
457 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
458 .emulated = ATA_SHT_EMULATED,
459 .use_clustering = 1,
460 .proc_name = DRV_NAME,
461 .dma_boundary = MV_DMA_BOUNDARY,
6c08772e 462 .slave_configure = mv_slave_config,
c5d3e45a
JG
463 .slave_destroy = ata_scsi_slave_destroy,
464 .bios_param = ata_std_bios_param,
465};
466
467static struct scsi_host_template mv6_sht = {
20f733e7
BR
468 .module = THIS_MODULE,
469 .name = DRV_NAME,
470 .ioctl = ata_scsi_ioctl,
471 .queuecommand = ata_scsi_queuecmd,
c5d3e45a 472 .can_queue = ATA_DEF_QUEUE,
20f733e7 473 .this_id = ATA_SHT_THIS_ID,
baf14aa1 474 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7
BR
475 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
476 .emulated = ATA_SHT_EMULATED,
d88184fb 477 .use_clustering = 1,
20f733e7
BR
478 .proc_name = DRV_NAME,
479 .dma_boundary = MV_DMA_BOUNDARY,
6c08772e 480 .slave_configure = mv_slave_config,
ccf68c34 481 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 482 .bios_param = ata_std_bios_param,
20f733e7
BR
483};
484
c9d39130
JG
485static const struct ata_port_operations mv5_ops = {
486 .port_disable = ata_port_disable,
487
488 .tf_load = ata_tf_load,
489 .tf_read = ata_tf_read,
490 .check_status = ata_check_status,
491 .exec_command = ata_exec_command,
492 .dev_select = ata_std_dev_select,
493
cffacd85 494 .cable_detect = ata_cable_sata,
c9d39130
JG
495
496 .qc_prep = mv_qc_prep,
497 .qc_issue = mv_qc_issue,
0d5ff566 498 .data_xfer = ata_data_xfer,
c9d39130 499
c9d39130 500 .irq_clear = mv_irq_clear,
246ce3b6
AI
501 .irq_on = ata_irq_on,
502 .irq_ack = ata_irq_ack,
c9d39130 503
bdd4ddde
JG
504 .error_handler = mv_error_handler,
505 .post_internal_cmd = mv_post_int_cmd,
506 .freeze = mv_eh_freeze,
507 .thaw = mv_eh_thaw,
508
c9d39130
JG
509 .scr_read = mv5_scr_read,
510 .scr_write = mv5_scr_write,
511
512 .port_start = mv_port_start,
513 .port_stop = mv_port_stop,
c9d39130
JG
514};
515
516static const struct ata_port_operations mv6_ops = {
20f733e7
BR
517 .port_disable = ata_port_disable,
518
519 .tf_load = ata_tf_load,
520 .tf_read = ata_tf_read,
521 .check_status = ata_check_status,
522 .exec_command = ata_exec_command,
523 .dev_select = ata_std_dev_select,
524
cffacd85 525 .cable_detect = ata_cable_sata,
20f733e7 526
31961943
BR
527 .qc_prep = mv_qc_prep,
528 .qc_issue = mv_qc_issue,
0d5ff566 529 .data_xfer = ata_data_xfer,
20f733e7 530
20f733e7 531 .irq_clear = mv_irq_clear,
246ce3b6
AI
532 .irq_on = ata_irq_on,
533 .irq_ack = ata_irq_ack,
20f733e7 534
bdd4ddde
JG
535 .error_handler = mv_error_handler,
536 .post_internal_cmd = mv_post_int_cmd,
537 .freeze = mv_eh_freeze,
538 .thaw = mv_eh_thaw,
539
20f733e7
BR
540 .scr_read = mv_scr_read,
541 .scr_write = mv_scr_write,
542
31961943
BR
543 .port_start = mv_port_start,
544 .port_stop = mv_port_stop,
20f733e7
BR
545};
546
e4e7b892
JG
547static const struct ata_port_operations mv_iie_ops = {
548 .port_disable = ata_port_disable,
549
550 .tf_load = ata_tf_load,
551 .tf_read = ata_tf_read,
552 .check_status = ata_check_status,
553 .exec_command = ata_exec_command,
554 .dev_select = ata_std_dev_select,
555
cffacd85 556 .cable_detect = ata_cable_sata,
e4e7b892
JG
557
558 .qc_prep = mv_qc_prep_iie,
559 .qc_issue = mv_qc_issue,
0d5ff566 560 .data_xfer = ata_data_xfer,
e4e7b892 561
e4e7b892 562 .irq_clear = mv_irq_clear,
246ce3b6
AI
563 .irq_on = ata_irq_on,
564 .irq_ack = ata_irq_ack,
e4e7b892 565
bdd4ddde
JG
566 .error_handler = mv_error_handler,
567 .post_internal_cmd = mv_post_int_cmd,
568 .freeze = mv_eh_freeze,
569 .thaw = mv_eh_thaw,
570
e4e7b892
JG
571 .scr_read = mv_scr_read,
572 .scr_write = mv_scr_write,
573
574 .port_start = mv_port_start,
575 .port_stop = mv_port_stop,
e4e7b892
JG
576};
577
98ac62de 578static const struct ata_port_info mv_port_info[] = {
20f733e7 579 { /* chip_504x */
cca3974e 580 .flags = MV_COMMON_FLAGS,
31961943 581 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 582 .udma_mask = ATA_UDMA6,
c9d39130 583 .port_ops = &mv5_ops,
20f733e7
BR
584 },
585 { /* chip_508x */
c5d3e45a 586 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 587 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 588 .udma_mask = ATA_UDMA6,
c9d39130 589 .port_ops = &mv5_ops,
20f733e7 590 },
47c2b677 591 { /* chip_5080 */
c5d3e45a 592 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 593 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 594 .udma_mask = ATA_UDMA6,
c9d39130 595 .port_ops = &mv5_ops,
47c2b677 596 },
20f733e7 597 { /* chip_604x */
c5d3e45a 598 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
31961943 599 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 600 .udma_mask = ATA_UDMA6,
c9d39130 601 .port_ops = &mv6_ops,
20f733e7
BR
602 },
603 { /* chip_608x */
c5d3e45a
JG
604 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
605 MV_FLAG_DUAL_HC,
31961943 606 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 607 .udma_mask = ATA_UDMA6,
c9d39130 608 .port_ops = &mv6_ops,
20f733e7 609 },
e4e7b892 610 { /* chip_6042 */
c5d3e45a 611 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 612 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 613 .udma_mask = ATA_UDMA6,
e4e7b892
JG
614 .port_ops = &mv_iie_ops,
615 },
616 { /* chip_7042 */
c5d3e45a 617 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 618 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 619 .udma_mask = ATA_UDMA6,
e4e7b892
JG
620 .port_ops = &mv_iie_ops,
621 },
20f733e7
BR
622};
623
3b7d697d 624static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
625 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
626 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
627 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
628 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
629 /* RocketRAID 1740/174x have different identifiers */
630 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
631 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
632
633 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
634 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
635 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
636 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
637 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
638
639 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
640
d9f9c6bc
FA
641 /* Adaptec 1430SA */
642 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
643
e93f09dc
OJ
644 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
645
6a3d586d
MT
646 /* add Marvell 7042 support */
647 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
648
2d2744fc 649 { } /* terminate list */
20f733e7
BR
650};
651
652static struct pci_driver mv_pci_driver = {
653 .name = DRV_NAME,
654 .id_table = mv_pci_tbl,
655 .probe = mv_init_one,
656 .remove = ata_pci_remove_one,
657};
658
47c2b677
JG
659static const struct mv_hw_ops mv5xxx_ops = {
660 .phy_errata = mv5_phy_errata,
661 .enable_leds = mv5_enable_leds,
662 .read_preamp = mv5_read_preamp,
663 .reset_hc = mv5_reset_hc,
522479fb
JG
664 .reset_flash = mv5_reset_flash,
665 .reset_bus = mv5_reset_bus,
47c2b677
JG
666};
667
668static const struct mv_hw_ops mv6xxx_ops = {
669 .phy_errata = mv6_phy_errata,
670 .enable_leds = mv6_enable_leds,
671 .read_preamp = mv6_read_preamp,
672 .reset_hc = mv6_reset_hc,
522479fb
JG
673 .reset_flash = mv6_reset_flash,
674 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
675};
676
ddef9bb3
JG
677/*
678 * module options
679 */
680static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
681
682
d88184fb
JG
683/* move to PCI layer or libata core? */
684static int pci_go_64(struct pci_dev *pdev)
685{
686 int rc;
687
688 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
689 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
690 if (rc) {
691 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
692 if (rc) {
693 dev_printk(KERN_ERR, &pdev->dev,
694 "64-bit DMA enable failed\n");
695 return rc;
696 }
697 }
698 } else {
699 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
700 if (rc) {
701 dev_printk(KERN_ERR, &pdev->dev,
702 "32-bit DMA enable failed\n");
703 return rc;
704 }
705 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
706 if (rc) {
707 dev_printk(KERN_ERR, &pdev->dev,
708 "32-bit consistent DMA enable failed\n");
709 return rc;
710 }
711 }
712
713 return rc;
714}
715
20f733e7
BR
716/*
717 * Functions
718 */
719
720static inline void writelfl(unsigned long data, void __iomem *addr)
721{
722 writel(data, addr);
723 (void) readl(addr); /* flush to avoid PCI posted write */
724}
725
20f733e7
BR
726static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
727{
728 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
729}
730
c9d39130
JG
731static inline unsigned int mv_hc_from_port(unsigned int port)
732{
733 return port >> MV_PORT_HC_SHIFT;
734}
735
736static inline unsigned int mv_hardport_from_port(unsigned int port)
737{
738 return port & MV_PORT_MASK;
739}
740
741static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
742 unsigned int port)
743{
744 return mv_hc_base(base, mv_hc_from_port(port));
745}
746
20f733e7
BR
747static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
748{
c9d39130 749 return mv_hc_base_from_port(base, port) +
8b260248 750 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 751 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
752}
753
754static inline void __iomem *mv_ap_base(struct ata_port *ap)
755{
0d5ff566 756 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
20f733e7
BR
757}
758
cca3974e 759static inline int mv_get_hc_count(unsigned long port_flags)
31961943 760{
cca3974e 761 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
762}
763
764static void mv_irq_clear(struct ata_port *ap)
20f733e7 765{
20f733e7
BR
766}
767
6c08772e
JG
768static int mv_slave_config(struct scsi_device *sdev)
769{
770 int rc = ata_scsi_slave_config(sdev);
771 if (rc)
772 return rc;
773
774 blk_queue_max_phys_segments(sdev->request_queue, MV_MAX_SG_CT / 2);
775
776 return 0; /* scsi layer doesn't check return value, sigh */
777}
778
c5d3e45a
JG
779static void mv_set_edma_ptrs(void __iomem *port_mmio,
780 struct mv_host_priv *hpriv,
781 struct mv_port_priv *pp)
782{
bdd4ddde
JG
783 u32 index;
784
c5d3e45a
JG
785 /*
786 * initialize request queue
787 */
bdd4ddde
JG
788 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
789
c5d3e45a
JG
790 WARN_ON(pp->crqb_dma & 0x3ff);
791 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 792 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
793 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
794
795 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 796 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
797 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
798 else
bdd4ddde 799 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
800
801 /*
802 * initialize response queue
803 */
bdd4ddde
JG
804 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
805
c5d3e45a
JG
806 WARN_ON(pp->crpb_dma & 0xff);
807 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
808
809 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 810 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
811 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
812 else
bdd4ddde 813 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 814
bdd4ddde 815 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 816 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
817}
818
05b308e1
BR
819/**
820 * mv_start_dma - Enable eDMA engine
821 * @base: port base address
822 * @pp: port private data
823 *
beec7dbc
TH
824 * Verify the local cache of the eDMA state is accurate with a
825 * WARN_ON.
05b308e1
BR
826 *
827 * LOCKING:
828 * Inherited from caller.
829 */
c5d3e45a
JG
830static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
831 struct mv_port_priv *pp)
20f733e7 832{
c5d3e45a 833 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
bdd4ddde
JG
834 /* clear EDMA event indicators, if any */
835 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
836
837 mv_set_edma_ptrs(base, hpriv, pp);
838
afb0edd9
BR
839 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
840 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
841 }
beec7dbc 842 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
20f733e7
BR
843}
844
05b308e1 845/**
0ea9e179 846 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
847 * @ap: ATA channel to manipulate
848 *
beec7dbc
TH
849 * Verify the local cache of the eDMA state is accurate with a
850 * WARN_ON.
05b308e1
BR
851 *
852 * LOCKING:
853 * Inherited from caller.
854 */
0ea9e179 855static int __mv_stop_dma(struct ata_port *ap)
20f733e7 856{
31961943
BR
857 void __iomem *port_mmio = mv_ap_base(ap);
858 struct mv_port_priv *pp = ap->private_data;
31961943 859 u32 reg;
c5d3e45a 860 int i, err = 0;
31961943 861
4537deb5 862 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 863 /* Disable EDMA if active. The disable bit auto clears.
31961943 864 */
31961943
BR
865 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
866 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 867 } else {
beec7dbc 868 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
afb0edd9 869 }
8b260248 870
31961943
BR
871 /* now properly wait for the eDMA to stop */
872 for (i = 1000; i > 0; i--) {
873 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 874 if (!(reg & EDMA_EN))
31961943 875 break;
4537deb5 876
31961943
BR
877 udelay(100);
878 }
879
c5d3e45a 880 if (reg & EDMA_EN) {
f15a1daf 881 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 882 err = -EIO;
31961943 883 }
c5d3e45a
JG
884
885 return err;
20f733e7
BR
886}
887
0ea9e179
JG
888static int mv_stop_dma(struct ata_port *ap)
889{
890 unsigned long flags;
891 int rc;
892
893 spin_lock_irqsave(&ap->host->lock, flags);
894 rc = __mv_stop_dma(ap);
895 spin_unlock_irqrestore(&ap->host->lock, flags);
896
897 return rc;
898}
899
8a70f8dc 900#ifdef ATA_DEBUG
31961943 901static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 902{
31961943
BR
903 int b, w;
904 for (b = 0; b < bytes; ) {
905 DPRINTK("%p: ", start + b);
906 for (w = 0; b < bytes && w < 4; w++) {
907 printk("%08x ",readl(start + b));
908 b += sizeof(u32);
909 }
910 printk("\n");
911 }
31961943 912}
8a70f8dc
JG
913#endif
914
31961943
BR
915static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
916{
917#ifdef ATA_DEBUG
918 int b, w;
919 u32 dw;
920 for (b = 0; b < bytes; ) {
921 DPRINTK("%02x: ", b);
922 for (w = 0; b < bytes && w < 4; w++) {
923 (void) pci_read_config_dword(pdev,b,&dw);
924 printk("%08x ",dw);
925 b += sizeof(u32);
926 }
927 printk("\n");
928 }
929#endif
930}
931static void mv_dump_all_regs(void __iomem *mmio_base, int port,
932 struct pci_dev *pdev)
933{
934#ifdef ATA_DEBUG
8b260248 935 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
936 port >> MV_PORT_HC_SHIFT);
937 void __iomem *port_base;
938 int start_port, num_ports, p, start_hc, num_hcs, hc;
939
940 if (0 > port) {
941 start_hc = start_port = 0;
942 num_ports = 8; /* shld be benign for 4 port devs */
943 num_hcs = 2;
944 } else {
945 start_hc = port >> MV_PORT_HC_SHIFT;
946 start_port = port;
947 num_ports = num_hcs = 1;
948 }
8b260248 949 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
950 num_ports > 1 ? num_ports - 1 : start_port);
951
952 if (NULL != pdev) {
953 DPRINTK("PCI config space regs:\n");
954 mv_dump_pci_cfg(pdev, 0x68);
955 }
956 DPRINTK("PCI regs:\n");
957 mv_dump_mem(mmio_base+0xc00, 0x3c);
958 mv_dump_mem(mmio_base+0xd00, 0x34);
959 mv_dump_mem(mmio_base+0xf00, 0x4);
960 mv_dump_mem(mmio_base+0x1d00, 0x6c);
961 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 962 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
963 DPRINTK("HC regs (HC %i):\n", hc);
964 mv_dump_mem(hc_base, 0x1c);
965 }
966 for (p = start_port; p < start_port + num_ports; p++) {
967 port_base = mv_port_base(mmio_base, p);
968 DPRINTK("EDMA regs (port %i):\n",p);
969 mv_dump_mem(port_base, 0x54);
970 DPRINTK("SATA regs (port %i):\n",p);
971 mv_dump_mem(port_base+0x300, 0x60);
972 }
973#endif
20f733e7
BR
974}
975
976static unsigned int mv_scr_offset(unsigned int sc_reg_in)
977{
978 unsigned int ofs;
979
980 switch (sc_reg_in) {
981 case SCR_STATUS:
982 case SCR_CONTROL:
983 case SCR_ERROR:
984 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
985 break;
986 case SCR_ACTIVE:
987 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
988 break;
989 default:
990 ofs = 0xffffffffU;
991 break;
992 }
993 return ofs;
994}
995
da3dbb17 996static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
997{
998 unsigned int ofs = mv_scr_offset(sc_reg_in);
999
da3dbb17
TH
1000 if (ofs != 0xffffffffU) {
1001 *val = readl(mv_ap_base(ap) + ofs);
1002 return 0;
1003 } else
1004 return -EINVAL;
20f733e7
BR
1005}
1006
da3dbb17 1007static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
1008{
1009 unsigned int ofs = mv_scr_offset(sc_reg_in);
1010
da3dbb17 1011 if (ofs != 0xffffffffU) {
20f733e7 1012 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1013 return 0;
1014 } else
1015 return -EINVAL;
20f733e7
BR
1016}
1017
c5d3e45a
JG
1018static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1019 void __iomem *port_mmio)
e4e7b892
JG
1020{
1021 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1022
1023 /* set up non-NCQ EDMA configuration */
c5d3e45a 1024 cfg &= ~(1 << 9); /* disable eQue */
e4e7b892 1025
e728eabe
JG
1026 if (IS_GEN_I(hpriv)) {
1027 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1028 cfg |= (1 << 8); /* enab config burst size mask */
e728eabe 1029 }
e4e7b892 1030
e728eabe
JG
1031 else if (IS_GEN_II(hpriv)) {
1032 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1033 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
e728eabe
JG
1034 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1035 }
e4e7b892
JG
1036
1037 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1038 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1039 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892
JG
1040 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1041 cfg |= (1 << 18); /* enab early completion */
e728eabe
JG
1042 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1043 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
4537deb5 1044 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
e4e7b892
JG
1045 }
1046
1047 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1048}
1049
05b308e1
BR
1050/**
1051 * mv_port_start - Port specific init/start routine.
1052 * @ap: ATA channel to manipulate
1053 *
1054 * Allocate and point to DMA memory, init port private memory,
1055 * zero indices.
1056 *
1057 * LOCKING:
1058 * Inherited from caller.
1059 */
31961943
BR
1060static int mv_port_start(struct ata_port *ap)
1061{
cca3974e
JG
1062 struct device *dev = ap->host->dev;
1063 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1064 struct mv_port_priv *pp;
1065 void __iomem *port_mmio = mv_ap_base(ap);
1066 void *mem;
1067 dma_addr_t mem_dma;
0ea9e179 1068 unsigned long flags;
24dc5f33 1069 int rc;
31961943 1070
24dc5f33 1071 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1072 if (!pp)
24dc5f33 1073 return -ENOMEM;
31961943 1074
24dc5f33
TH
1075 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1076 GFP_KERNEL);
6037d6bb 1077 if (!mem)
24dc5f33 1078 return -ENOMEM;
31961943
BR
1079 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1080
6037d6bb
JG
1081 rc = ata_pad_alloc(ap, dev);
1082 if (rc)
24dc5f33 1083 return rc;
6037d6bb 1084
8b260248 1085 /* First item in chunk of DMA memory:
31961943
BR
1086 * 32-slot command request table (CRQB), 32 bytes each in size
1087 */
1088 pp->crqb = mem;
1089 pp->crqb_dma = mem_dma;
1090 mem += MV_CRQB_Q_SZ;
1091 mem_dma += MV_CRQB_Q_SZ;
1092
8b260248 1093 /* Second item:
31961943
BR
1094 * 32-slot command response table (CRPB), 8 bytes each in size
1095 */
1096 pp->crpb = mem;
1097 pp->crpb_dma = mem_dma;
1098 mem += MV_CRPB_Q_SZ;
1099 mem_dma += MV_CRPB_Q_SZ;
1100
1101 /* Third item:
1102 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1103 */
1104 pp->sg_tbl = mem;
1105 pp->sg_tbl_dma = mem_dma;
1106
0ea9e179
JG
1107 spin_lock_irqsave(&ap->host->lock, flags);
1108
c5d3e45a 1109 mv_edma_cfg(ap, hpriv, port_mmio);
e4e7b892 1110
c5d3e45a 1111 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1112
0ea9e179
JG
1113 spin_unlock_irqrestore(&ap->host->lock, flags);
1114
31961943
BR
1115 /* Don't turn on EDMA here...do it before DMA commands only. Else
1116 * we'll be unable to send non-data, PIO, etc due to restricted access
1117 * to shadow regs.
1118 */
1119 ap->private_data = pp;
1120 return 0;
1121}
1122
05b308e1
BR
1123/**
1124 * mv_port_stop - Port specific cleanup/stop routine.
1125 * @ap: ATA channel to manipulate
1126 *
1127 * Stop DMA, cleanup port memory.
1128 *
1129 * LOCKING:
cca3974e 1130 * This routine uses the host lock to protect the DMA stop.
05b308e1 1131 */
31961943
BR
1132static void mv_port_stop(struct ata_port *ap)
1133{
31961943 1134 mv_stop_dma(ap);
31961943
BR
1135}
1136
05b308e1
BR
1137/**
1138 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1139 * @qc: queued command whose SG list to source from
1140 *
1141 * Populate the SG list and mark the last entry.
1142 *
1143 * LOCKING:
1144 * Inherited from caller.
1145 */
6c08772e 1146static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1147{
1148 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1149 struct scatterlist *sg;
d88184fb 1150 struct mv_sg *mv_sg;
31961943 1151
d88184fb 1152 mv_sg = pp->sg_tbl;
972c26bd 1153 ata_for_each_sg(sg, qc) {
d88184fb
JG
1154 dma_addr_t addr = sg_dma_address(sg);
1155 u32 sg_len = sg_dma_len(sg);
22374677 1156
4007b493
OJ
1157 while (sg_len) {
1158 u32 offset = addr & 0xffff;
1159 u32 len = sg_len;
22374677 1160
4007b493
OJ
1161 if ((offset + sg_len > 0x10000))
1162 len = 0x10000 - offset;
1163
1164 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1165 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1166 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1167
1168 sg_len -= len;
1169 addr += len;
1170
1171 if (!sg_len && ata_sg_is_last(sg, qc))
1172 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1173
1174 mv_sg++;
4007b493 1175 }
22374677 1176
31961943
BR
1177 }
1178}
1179
e1469874 1180static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1181{
559eedad 1182 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1183 (last ? CRQB_CMD_LAST : 0);
559eedad 1184 *cmdw = cpu_to_le16(tmp);
31961943
BR
1185}
1186
05b308e1
BR
1187/**
1188 * mv_qc_prep - Host specific command preparation.
1189 * @qc: queued command to prepare
1190 *
1191 * This routine simply redirects to the general purpose routine
1192 * if command is not DMA. Else, it handles prep of the CRQB
1193 * (command request block), does some sanity checking, and calls
1194 * the SG load routine.
1195 *
1196 * LOCKING:
1197 * Inherited from caller.
1198 */
31961943
BR
1199static void mv_qc_prep(struct ata_queued_cmd *qc)
1200{
1201 struct ata_port *ap = qc->ap;
1202 struct mv_port_priv *pp = ap->private_data;
e1469874 1203 __le16 *cw;
31961943
BR
1204 struct ata_taskfile *tf;
1205 u16 flags = 0;
a6432436 1206 unsigned in_index;
31961943 1207
c5d3e45a 1208 if (qc->tf.protocol != ATA_PROT_DMA)
31961943 1209 return;
20f733e7 1210
31961943
BR
1211 /* Fill in command request block
1212 */
e4e7b892 1213 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1214 flags |= CRQB_FLAG_READ;
beec7dbc 1215 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1216 flags |= qc->tag << CRQB_TAG_SHIFT;
4537deb5 1217 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
31961943 1218
bdd4ddde
JG
1219 /* get current queue index from software */
1220 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1221
1222 pp->crqb[in_index].sg_addr =
31961943 1223 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
a6432436 1224 pp->crqb[in_index].sg_addr_hi =
31961943 1225 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
a6432436 1226 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1227
a6432436 1228 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1229 tf = &qc->tf;
1230
1231 /* Sadly, the CRQB cannot accomodate all registers--there are
1232 * only 11 bytes...so we must pick and choose required
1233 * registers based on the command. So, we drop feature and
1234 * hob_feature for [RW] DMA commands, but they are needed for
1235 * NCQ. NCQ will drop hob_nsect.
20f733e7 1236 */
31961943
BR
1237 switch (tf->command) {
1238 case ATA_CMD_READ:
1239 case ATA_CMD_READ_EXT:
1240 case ATA_CMD_WRITE:
1241 case ATA_CMD_WRITE_EXT:
c15d85c8 1242 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1243 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1244 break;
1245#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1246 case ATA_CMD_FPDMA_READ:
1247 case ATA_CMD_FPDMA_WRITE:
8b260248 1248 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1249 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1250 break;
1251#endif /* FIXME: remove this line when NCQ added */
1252 default:
1253 /* The only other commands EDMA supports in non-queued and
1254 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1255 * of which are defined/used by Linux. If we get here, this
1256 * driver needs work.
1257 *
1258 * FIXME: modify libata to give qc_prep a return value and
1259 * return error here.
1260 */
1261 BUG_ON(tf->command);
1262 break;
1263 }
1264 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1265 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1266 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1267 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1268 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1269 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1270 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1271 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1272 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1273
e4e7b892
JG
1274 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1275 return;
1276 mv_fill_sg(qc);
1277}
1278
1279/**
1280 * mv_qc_prep_iie - Host specific command preparation.
1281 * @qc: queued command to prepare
1282 *
1283 * This routine simply redirects to the general purpose routine
1284 * if command is not DMA. Else, it handles prep of the CRQB
1285 * (command request block), does some sanity checking, and calls
1286 * the SG load routine.
1287 *
1288 * LOCKING:
1289 * Inherited from caller.
1290 */
1291static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1292{
1293 struct ata_port *ap = qc->ap;
1294 struct mv_port_priv *pp = ap->private_data;
1295 struct mv_crqb_iie *crqb;
1296 struct ata_taskfile *tf;
a6432436 1297 unsigned in_index;
e4e7b892
JG
1298 u32 flags = 0;
1299
c5d3e45a 1300 if (qc->tf.protocol != ATA_PROT_DMA)
e4e7b892
JG
1301 return;
1302
e4e7b892
JG
1303 /* Fill in Gen IIE command request block
1304 */
1305 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1306 flags |= CRQB_FLAG_READ;
1307
beec7dbc 1308 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1309 flags |= qc->tag << CRQB_TAG_SHIFT;
bdd4ddde 1310 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
4537deb5 1311 what we use as our tag */
e4e7b892 1312
bdd4ddde
JG
1313 /* get current queue index from software */
1314 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1315
1316 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
e4e7b892
JG
1317 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1318 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1319 crqb->flags = cpu_to_le32(flags);
1320
1321 tf = &qc->tf;
1322 crqb->ata_cmd[0] = cpu_to_le32(
1323 (tf->command << 16) |
1324 (tf->feature << 24)
1325 );
1326 crqb->ata_cmd[1] = cpu_to_le32(
1327 (tf->lbal << 0) |
1328 (tf->lbam << 8) |
1329 (tf->lbah << 16) |
1330 (tf->device << 24)
1331 );
1332 crqb->ata_cmd[2] = cpu_to_le32(
1333 (tf->hob_lbal << 0) |
1334 (tf->hob_lbam << 8) |
1335 (tf->hob_lbah << 16) |
1336 (tf->hob_feature << 24)
1337 );
1338 crqb->ata_cmd[3] = cpu_to_le32(
1339 (tf->nsect << 0) |
1340 (tf->hob_nsect << 8)
1341 );
1342
1343 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1344 return;
31961943
BR
1345 mv_fill_sg(qc);
1346}
1347
05b308e1
BR
1348/**
1349 * mv_qc_issue - Initiate a command to the host
1350 * @qc: queued command to start
1351 *
1352 * This routine simply redirects to the general purpose routine
1353 * if command is not DMA. Else, it sanity checks our local
1354 * caches of the request producer/consumer indices then enables
1355 * DMA and bumps the request producer index.
1356 *
1357 * LOCKING:
1358 * Inherited from caller.
1359 */
9a3d9eb0 1360static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1361{
c5d3e45a
JG
1362 struct ata_port *ap = qc->ap;
1363 void __iomem *port_mmio = mv_ap_base(ap);
1364 struct mv_port_priv *pp = ap->private_data;
1365 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde 1366 u32 in_index;
31961943 1367
c5d3e45a 1368 if (qc->tf.protocol != ATA_PROT_DMA) {
31961943
BR
1369 /* We're about to send a non-EDMA capable command to the
1370 * port. Turn off EDMA so there won't be problems accessing
1371 * shadow block, etc registers.
1372 */
0ea9e179 1373 __mv_stop_dma(ap);
31961943
BR
1374 return ata_qc_issue_prot(qc);
1375 }
1376
bdd4ddde
JG
1377 mv_start_dma(port_mmio, hpriv, pp);
1378
1379 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
31961943 1380
31961943 1381 /* until we do queuing, the queue should be empty at this point */
a6432436
ML
1382 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1383 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
31961943 1384
bdd4ddde 1385 pp->req_idx++;
31961943 1386
bdd4ddde 1387 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1388
1389 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1390 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1391 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1392
1393 return 0;
1394}
1395
05b308e1
BR
1396/**
1397 * mv_err_intr - Handle error interrupts on the port
1398 * @ap: ATA channel to manipulate
9b358e30 1399 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1400 *
1401 * In most cases, just clear the interrupt and move on. However,
1402 * some cases require an eDMA reset, which is done right before
1403 * the COMRESET in mv_phy_reset(). The SERR case requires a
1404 * clear of pending errors in the SATA SERROR register. Finally,
1405 * if the port disabled DMA, update our cached copy to match.
1406 *
1407 * LOCKING:
1408 * Inherited from caller.
1409 */
bdd4ddde 1410static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1411{
1412 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1413 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1414 struct mv_port_priv *pp = ap->private_data;
1415 struct mv_host_priv *hpriv = ap->host->private_data;
1416 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1417 unsigned int action = 0, err_mask = 0;
1418 struct ata_eh_info *ehi = &ap->eh_info;
20f733e7 1419
bdd4ddde 1420 ata_ehi_clear_desc(ehi);
20f733e7 1421
bdd4ddde
JG
1422 if (!edma_enabled) {
1423 /* just a guess: do we need to do this? should we
1424 * expand this, and do it in all cases?
1425 */
81952c54
TH
1426 sata_scr_read(ap, SCR_ERROR, &serr);
1427 sata_scr_write_flush(ap, SCR_ERROR, serr);
20f733e7 1428 }
bdd4ddde
JG
1429
1430 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1431
1432 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1433
1434 /*
1435 * all generations share these EDMA error cause bits
1436 */
1437
1438 if (edma_err_cause & EDMA_ERR_DEV)
1439 err_mask |= AC_ERR_DEV;
1440 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1441 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1442 EDMA_ERR_INTRL_PAR)) {
1443 err_mask |= AC_ERR_ATA_BUS;
1444 action |= ATA_EH_HARDRESET;
b64bbc39 1445 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1446 }
1447 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1448 ata_ehi_hotplugged(ehi);
1449 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1450 "dev disconnect" : "dev connect");
bdd4ddde
JG
1451 }
1452
ee9ccdf7 1453 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1454 eh_freeze_mask = EDMA_EH_FREEZE_5;
1455
1456 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1457 struct mv_port_priv *pp = ap->private_data;
1458 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1459 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1460 }
1461 } else {
1462 eh_freeze_mask = EDMA_EH_FREEZE;
1463
1464 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1465 struct mv_port_priv *pp = ap->private_data;
1466 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1467 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1468 }
1469
1470 if (edma_err_cause & EDMA_ERR_SERR) {
1471 sata_scr_read(ap, SCR_ERROR, &serr);
1472 sata_scr_write_flush(ap, SCR_ERROR, serr);
1473 err_mask = AC_ERR_ATA_BUS;
1474 action |= ATA_EH_HARDRESET;
1475 }
afb0edd9 1476 }
20f733e7
BR
1477
1478 /* Clear EDMA now that SERR cleanup done */
1479 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1480
bdd4ddde
JG
1481 if (!err_mask) {
1482 err_mask = AC_ERR_OTHER;
1483 action |= ATA_EH_HARDRESET;
1484 }
1485
1486 ehi->serror |= serr;
1487 ehi->action |= action;
1488
1489 if (qc)
1490 qc->err_mask |= err_mask;
1491 else
1492 ehi->err_mask |= err_mask;
1493
1494 if (edma_err_cause & eh_freeze_mask)
1495 ata_port_freeze(ap);
1496 else
1497 ata_port_abort(ap);
1498}
1499
1500static void mv_intr_pio(struct ata_port *ap)
1501{
1502 struct ata_queued_cmd *qc;
1503 u8 ata_status;
1504
1505 /* ignore spurious intr if drive still BUSY */
1506 ata_status = readb(ap->ioaddr.status_addr);
1507 if (unlikely(ata_status & ATA_BUSY))
1508 return;
1509
1510 /* get active ATA command */
1511 qc = ata_qc_from_tag(ap, ap->active_tag);
1512 if (unlikely(!qc)) /* no active tag */
1513 return;
1514 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1515 return;
1516
1517 /* and finally, complete the ATA command */
1518 qc->err_mask |= ac_err_mask(ata_status);
1519 ata_qc_complete(qc);
1520}
1521
1522static void mv_intr_edma(struct ata_port *ap)
1523{
1524 void __iomem *port_mmio = mv_ap_base(ap);
1525 struct mv_host_priv *hpriv = ap->host->private_data;
1526 struct mv_port_priv *pp = ap->private_data;
1527 struct ata_queued_cmd *qc;
1528 u32 out_index, in_index;
1529 bool work_done = false;
1530
1531 /* get h/w response queue pointer */
1532 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1533 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1534
1535 while (1) {
1536 u16 status;
6c1153e0 1537 unsigned int tag;
bdd4ddde
JG
1538
1539 /* get s/w response queue last-read pointer, and compare */
1540 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1541 if (in_index == out_index)
1542 break;
1543
bdd4ddde 1544 /* 50xx: get active ATA command */
0ea9e179 1545 if (IS_GEN_I(hpriv))
6c1153e0 1546 tag = ap->active_tag;
bdd4ddde 1547
6c1153e0
JG
1548 /* Gen II/IIE: get active ATA command via tag, to enable
1549 * support for queueing. this works transparently for
1550 * queued and non-queued modes.
bdd4ddde 1551 */
6c1153e0
JG
1552 else if (IS_GEN_II(hpriv))
1553 tag = (le16_to_cpu(pp->crpb[out_index].id)
1554 >> CRPB_IOID_SHIFT_6) & 0x3f;
bdd4ddde 1555
6c1153e0
JG
1556 else /* IS_GEN_IIE */
1557 tag = (le16_to_cpu(pp->crpb[out_index].id)
1558 >> CRPB_IOID_SHIFT_7) & 0x3f;
bdd4ddde 1559
6c1153e0 1560 qc = ata_qc_from_tag(ap, tag);
bdd4ddde
JG
1561
1562 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1563 * bits (WARNING: might not necessarily be associated
1564 * with this command), which -should- be clear
1565 * if all is well
1566 */
1567 status = le16_to_cpu(pp->crpb[out_index].flags);
1568 if (unlikely(status & 0xff)) {
1569 mv_err_intr(ap, qc);
1570 return;
1571 }
1572
1573 /* and finally, complete the ATA command */
1574 if (qc) {
1575 qc->err_mask |=
1576 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1577 ata_qc_complete(qc);
1578 }
1579
0ea9e179 1580 /* advance software response queue pointer, to
bdd4ddde
JG
1581 * indicate (after the loop completes) to hardware
1582 * that we have consumed a response queue entry.
1583 */
1584 work_done = true;
1585 pp->resp_idx++;
1586 }
1587
1588 if (work_done)
1589 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1590 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1591 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1592}
1593
05b308e1
BR
1594/**
1595 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1596 * @host: host specific structure
05b308e1
BR
1597 * @relevant: port error bits relevant to this host controller
1598 * @hc: which host controller we're to look at
1599 *
1600 * Read then write clear the HC interrupt status then walk each
1601 * port connected to the HC and see if it needs servicing. Port
1602 * success ints are reported in the HC interrupt status reg, the
1603 * port error ints are reported in the higher level main
1604 * interrupt status register and thus are passed in via the
1605 * 'relevant' argument.
1606 *
1607 * LOCKING:
1608 * Inherited from caller.
1609 */
cca3974e 1610static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1611{
0d5ff566 1612 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7 1613 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1614 u32 hc_irq_cause;
c5d3e45a 1615 int port, port0;
20f733e7 1616
35177265 1617 if (hc == 0)
20f733e7 1618 port0 = 0;
35177265 1619 else
20f733e7 1620 port0 = MV_PORTS_PER_HC;
20f733e7
BR
1621
1622 /* we'll need the HC success int register in most cases */
1623 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1624 if (!hc_irq_cause)
1625 return;
1626
1627 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1628
1629 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1630 hc,relevant,hc_irq_cause);
1631
1632 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
cca3974e 1633 struct ata_port *ap = host->ports[port];
63af2a5c 1634 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1635 int have_err_bits, hard_port, shift;
55d8ca4f 1636
bdd4ddde 1637 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1638 continue;
1639
31961943 1640 shift = port << 1; /* (port * 2) */
20f733e7
BR
1641 if (port >= MV_PORTS_PER_HC) {
1642 shift++; /* skip bit 8 in the HC Main IRQ reg */
1643 }
bdd4ddde
JG
1644 have_err_bits = ((PORT0_ERR << shift) & relevant);
1645
1646 if (unlikely(have_err_bits)) {
1647 struct ata_queued_cmd *qc;
8b260248 1648
20f733e7 1649 qc = ata_qc_from_tag(ap, ap->active_tag);
bdd4ddde
JG
1650 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1651 continue;
1652
1653 mv_err_intr(ap, qc);
1654 continue;
1655 }
1656
1657 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1658
1659 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1660 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1661 mv_intr_edma(ap);
1662 } else {
1663 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1664 mv_intr_pio(ap);
20f733e7
BR
1665 }
1666 }
1667 VPRINTK("EXIT\n");
1668}
1669
bdd4ddde
JG
1670static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1671{
1672 struct ata_port *ap;
1673 struct ata_queued_cmd *qc;
1674 struct ata_eh_info *ehi;
1675 unsigned int i, err_mask, printed = 0;
1676 u32 err_cause;
1677
1678 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1679
1680 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1681 err_cause);
1682
1683 DPRINTK("All regs @ PCI error\n");
1684 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1685
1686 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1687
1688 for (i = 0; i < host->n_ports; i++) {
1689 ap = host->ports[i];
1690 if (!ata_port_offline(ap)) {
1691 ehi = &ap->eh_info;
1692 ata_ehi_clear_desc(ehi);
1693 if (!printed++)
1694 ata_ehi_push_desc(ehi,
1695 "PCI err cause 0x%08x", err_cause);
1696 err_mask = AC_ERR_HOST_BUS;
1697 ehi->action = ATA_EH_HARDRESET;
1698 qc = ata_qc_from_tag(ap, ap->active_tag);
1699 if (qc)
1700 qc->err_mask |= err_mask;
1701 else
1702 ehi->err_mask |= err_mask;
1703
1704 ata_port_freeze(ap);
1705 }
1706 }
1707}
1708
05b308e1 1709/**
c5d3e45a 1710 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1711 * @irq: unused
1712 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1713 *
1714 * Read the read only register to determine if any host
1715 * controllers have pending interrupts. If so, call lower level
1716 * routine to handle. Also check for PCI errors which are only
1717 * reported here.
1718 *
8b260248 1719 * LOCKING:
cca3974e 1720 * This routine holds the host lock while processing pending
05b308e1
BR
1721 * interrupts.
1722 */
7d12e780 1723static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1724{
cca3974e 1725 struct ata_host *host = dev_instance;
20f733e7 1726 unsigned int hc, handled = 0, n_hcs;
0d5ff566 1727 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7
BR
1728 u32 irq_stat;
1729
20f733e7 1730 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
20f733e7
BR
1731
1732 /* check the cases where we either have nothing pending or have read
1733 * a bogus register value which can indicate HW removal or PCI fault
1734 */
35177265 1735 if (!irq_stat || (0xffffffffU == irq_stat))
20f733e7 1736 return IRQ_NONE;
20f733e7 1737
cca3974e
JG
1738 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1739 spin_lock(&host->lock);
20f733e7 1740
bdd4ddde
JG
1741 if (unlikely(irq_stat & PCI_ERR)) {
1742 mv_pci_error(host, mmio);
1743 handled = 1;
1744 goto out_unlock; /* skip all other HC irq handling */
1745 }
1746
20f733e7
BR
1747 for (hc = 0; hc < n_hcs; hc++) {
1748 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1749 if (relevant) {
cca3974e 1750 mv_host_intr(host, relevant, hc);
bdd4ddde 1751 handled = 1;
20f733e7
BR
1752 }
1753 }
615ab953 1754
bdd4ddde 1755out_unlock:
cca3974e 1756 spin_unlock(&host->lock);
20f733e7
BR
1757
1758 return IRQ_RETVAL(handled);
1759}
1760
c9d39130
JG
1761static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1762{
1763 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1764 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1765
1766 return hc_mmio + ofs;
1767}
1768
1769static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1770{
1771 unsigned int ofs;
1772
1773 switch (sc_reg_in) {
1774 case SCR_STATUS:
1775 case SCR_ERROR:
1776 case SCR_CONTROL:
1777 ofs = sc_reg_in * sizeof(u32);
1778 break;
1779 default:
1780 ofs = 0xffffffffU;
1781 break;
1782 }
1783 return ofs;
1784}
1785
da3dbb17 1786static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1787{
0d5ff566
TH
1788 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1789 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1790 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1791
da3dbb17
TH
1792 if (ofs != 0xffffffffU) {
1793 *val = readl(addr + ofs);
1794 return 0;
1795 } else
1796 return -EINVAL;
c9d39130
JG
1797}
1798
da3dbb17 1799static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1800{
0d5ff566
TH
1801 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1802 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1803 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1804
da3dbb17 1805 if (ofs != 0xffffffffU) {
0d5ff566 1806 writelfl(val, addr + ofs);
da3dbb17
TH
1807 return 0;
1808 } else
1809 return -EINVAL;
c9d39130
JG
1810}
1811
522479fb
JG
1812static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1813{
522479fb
JG
1814 int early_5080;
1815
44c10138 1816 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1817
1818 if (!early_5080) {
1819 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1820 tmp |= (1 << 0);
1821 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1822 }
1823
1824 mv_reset_pci_bus(pdev, mmio);
1825}
1826
1827static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1828{
1829 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1830}
1831
47c2b677 1832static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1833 void __iomem *mmio)
1834{
c9d39130
JG
1835 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1836 u32 tmp;
1837
1838 tmp = readl(phy_mmio + MV5_PHY_MODE);
1839
1840 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1841 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1842}
1843
47c2b677 1844static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1845{
522479fb
JG
1846 u32 tmp;
1847
1848 writel(0, mmio + MV_GPIO_PORT_CTL);
1849
1850 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1851
1852 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1853 tmp |= ~(1 << 0);
1854 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1855}
1856
2a47ce06
JG
1857static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1858 unsigned int port)
bca1c4eb 1859{
c9d39130
JG
1860 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1861 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1862 u32 tmp;
1863 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1864
1865 if (fix_apm_sq) {
1866 tmp = readl(phy_mmio + MV5_LT_MODE);
1867 tmp |= (1 << 19);
1868 writel(tmp, phy_mmio + MV5_LT_MODE);
1869
1870 tmp = readl(phy_mmio + MV5_PHY_CTL);
1871 tmp &= ~0x3;
1872 tmp |= 0x1;
1873 writel(tmp, phy_mmio + MV5_PHY_CTL);
1874 }
1875
1876 tmp = readl(phy_mmio + MV5_PHY_MODE);
1877 tmp &= ~mask;
1878 tmp |= hpriv->signal[port].pre;
1879 tmp |= hpriv->signal[port].amps;
1880 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1881}
1882
c9d39130
JG
1883
1884#undef ZERO
1885#define ZERO(reg) writel(0, port_mmio + (reg))
1886static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1887 unsigned int port)
1888{
1889 void __iomem *port_mmio = mv_port_base(mmio, port);
1890
1891 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1892
1893 mv_channel_reset(hpriv, mmio, port);
1894
1895 ZERO(0x028); /* command */
1896 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1897 ZERO(0x004); /* timer */
1898 ZERO(0x008); /* irq err cause */
1899 ZERO(0x00c); /* irq err mask */
1900 ZERO(0x010); /* rq bah */
1901 ZERO(0x014); /* rq inp */
1902 ZERO(0x018); /* rq outp */
1903 ZERO(0x01c); /* respq bah */
1904 ZERO(0x024); /* respq outp */
1905 ZERO(0x020); /* respq inp */
1906 ZERO(0x02c); /* test control */
1907 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1908}
1909#undef ZERO
1910
1911#define ZERO(reg) writel(0, hc_mmio + (reg))
1912static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1913 unsigned int hc)
47c2b677 1914{
c9d39130
JG
1915 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1916 u32 tmp;
1917
1918 ZERO(0x00c);
1919 ZERO(0x010);
1920 ZERO(0x014);
1921 ZERO(0x018);
1922
1923 tmp = readl(hc_mmio + 0x20);
1924 tmp &= 0x1c1c1c1c;
1925 tmp |= 0x03030303;
1926 writel(tmp, hc_mmio + 0x20);
1927}
1928#undef ZERO
1929
1930static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1931 unsigned int n_hc)
1932{
1933 unsigned int hc, port;
1934
1935 for (hc = 0; hc < n_hc; hc++) {
1936 for (port = 0; port < MV_PORTS_PER_HC; port++)
1937 mv5_reset_hc_port(hpriv, mmio,
1938 (hc * MV_PORTS_PER_HC) + port);
1939
1940 mv5_reset_one_hc(hpriv, mmio, hc);
1941 }
1942
1943 return 0;
47c2b677
JG
1944}
1945
101ffae2
JG
1946#undef ZERO
1947#define ZERO(reg) writel(0, mmio + (reg))
1948static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1949{
1950 u32 tmp;
1951
1952 tmp = readl(mmio + MV_PCI_MODE);
1953 tmp &= 0xff00ffff;
1954 writel(tmp, mmio + MV_PCI_MODE);
1955
1956 ZERO(MV_PCI_DISC_TIMER);
1957 ZERO(MV_PCI_MSI_TRIGGER);
1958 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1959 ZERO(HC_MAIN_IRQ_MASK_OFS);
1960 ZERO(MV_PCI_SERR_MASK);
1961 ZERO(PCI_IRQ_CAUSE_OFS);
1962 ZERO(PCI_IRQ_MASK_OFS);
1963 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1964 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1965 ZERO(MV_PCI_ERR_ATTRIBUTE);
1966 ZERO(MV_PCI_ERR_COMMAND);
1967}
1968#undef ZERO
1969
1970static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1971{
1972 u32 tmp;
1973
1974 mv5_reset_flash(hpriv, mmio);
1975
1976 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1977 tmp &= 0x3;
1978 tmp |= (1 << 5) | (1 << 6);
1979 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1980}
1981
1982/**
1983 * mv6_reset_hc - Perform the 6xxx global soft reset
1984 * @mmio: base address of the HBA
1985 *
1986 * This routine only applies to 6xxx parts.
1987 *
1988 * LOCKING:
1989 * Inherited from caller.
1990 */
c9d39130
JG
1991static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1992 unsigned int n_hc)
101ffae2
JG
1993{
1994 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1995 int i, rc = 0;
1996 u32 t;
1997
1998 /* Following procedure defined in PCI "main command and status
1999 * register" table.
2000 */
2001 t = readl(reg);
2002 writel(t | STOP_PCI_MASTER, reg);
2003
2004 for (i = 0; i < 1000; i++) {
2005 udelay(1);
2006 t = readl(reg);
2007 if (PCI_MASTER_EMPTY & t) {
2008 break;
2009 }
2010 }
2011 if (!(PCI_MASTER_EMPTY & t)) {
2012 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2013 rc = 1;
2014 goto done;
2015 }
2016
2017 /* set reset */
2018 i = 5;
2019 do {
2020 writel(t | GLOB_SFT_RST, reg);
2021 t = readl(reg);
2022 udelay(1);
2023 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2024
2025 if (!(GLOB_SFT_RST & t)) {
2026 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2027 rc = 1;
2028 goto done;
2029 }
2030
2031 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2032 i = 5;
2033 do {
2034 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2035 t = readl(reg);
2036 udelay(1);
2037 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2038
2039 if (GLOB_SFT_RST & t) {
2040 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2041 rc = 1;
2042 }
2043done:
2044 return rc;
2045}
2046
47c2b677 2047static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2048 void __iomem *mmio)
2049{
2050 void __iomem *port_mmio;
2051 u32 tmp;
2052
ba3fe8fb
JG
2053 tmp = readl(mmio + MV_RESET_CFG);
2054 if ((tmp & (1 << 0)) == 0) {
47c2b677 2055 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2056 hpriv->signal[idx].pre = 0x1 << 5;
2057 return;
2058 }
2059
2060 port_mmio = mv_port_base(mmio, idx);
2061 tmp = readl(port_mmio + PHY_MODE2);
2062
2063 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2064 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2065}
2066
47c2b677 2067static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2068{
47c2b677 2069 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2070}
2071
c9d39130 2072static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2073 unsigned int port)
bca1c4eb 2074{
c9d39130
JG
2075 void __iomem *port_mmio = mv_port_base(mmio, port);
2076
bca1c4eb 2077 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2078 int fix_phy_mode2 =
2079 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2080 int fix_phy_mode4 =
47c2b677
JG
2081 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2082 u32 m2, tmp;
2083
2084 if (fix_phy_mode2) {
2085 m2 = readl(port_mmio + PHY_MODE2);
2086 m2 &= ~(1 << 16);
2087 m2 |= (1 << 31);
2088 writel(m2, port_mmio + PHY_MODE2);
2089
2090 udelay(200);
2091
2092 m2 = readl(port_mmio + PHY_MODE2);
2093 m2 &= ~((1 << 16) | (1 << 31));
2094 writel(m2, port_mmio + PHY_MODE2);
2095
2096 udelay(200);
2097 }
2098
2099 /* who knows what this magic does */
2100 tmp = readl(port_mmio + PHY_MODE3);
2101 tmp &= ~0x7F800000;
2102 tmp |= 0x2A800000;
2103 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2104
2105 if (fix_phy_mode4) {
47c2b677 2106 u32 m4;
bca1c4eb
JG
2107
2108 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2109
2110 if (hp_flags & MV_HP_ERRATA_60X1B2)
2111 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2112
2113 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2114
2115 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2116
2117 if (hp_flags & MV_HP_ERRATA_60X1B2)
2118 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2119 }
2120
2121 /* Revert values of pre-emphasis and signal amps to the saved ones */
2122 m2 = readl(port_mmio + PHY_MODE2);
2123
2124 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2125 m2 |= hpriv->signal[port].amps;
2126 m2 |= hpriv->signal[port].pre;
47c2b677 2127 m2 &= ~(1 << 16);
bca1c4eb 2128
e4e7b892
JG
2129 /* according to mvSata 3.6.1, some IIE values are fixed */
2130 if (IS_GEN_IIE(hpriv)) {
2131 m2 &= ~0xC30FF01F;
2132 m2 |= 0x0000900F;
2133 }
2134
bca1c4eb
JG
2135 writel(m2, port_mmio + PHY_MODE2);
2136}
2137
c9d39130
JG
2138static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2139 unsigned int port_no)
2140{
2141 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2142
2143 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2144
ee9ccdf7 2145 if (IS_GEN_II(hpriv)) {
c9d39130 2146 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2147 ifctl |= (1 << 7); /* enable gen2i speed */
2148 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2149 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2150 }
2151
2152 udelay(25); /* allow reset propagation */
2153
2154 /* Spec never mentions clearing the bit. Marvell's driver does
2155 * clear the bit, however.
2156 */
2157 writelfl(0, port_mmio + EDMA_CMD_OFS);
2158
2159 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2160
ee9ccdf7 2161 if (IS_GEN_I(hpriv))
c9d39130
JG
2162 mdelay(1);
2163}
2164
05b308e1 2165/**
bdd4ddde 2166 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2167 * @ap: ATA channel to manipulate
2168 *
2169 * Part of this is taken from __sata_phy_reset and modified to
2170 * not sleep since this routine gets called from interrupt level.
2171 *
2172 * LOCKING:
2173 * Inherited from caller. This is coded to safe to call at
2174 * interrupt level, i.e. it does not sleep.
31961943 2175 */
bdd4ddde
JG
2176static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2177 unsigned long deadline)
20f733e7 2178{
095fec88 2179 struct mv_port_priv *pp = ap->private_data;
cca3974e 2180 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2181 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2182 int retry = 5;
2183 u32 sstatus;
20f733e7
BR
2184
2185 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2186
da3dbb17
TH
2187#ifdef DEBUG
2188 {
2189 u32 sstatus, serror, scontrol;
2190
2191 mv_scr_read(ap, SCR_STATUS, &sstatus);
2192 mv_scr_read(ap, SCR_ERROR, &serror);
2193 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2194 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2195 "SCtrl 0x%08x\n", status, serror, scontrol);
2196 }
2197#endif
20f733e7 2198
22374677
JG
2199 /* Issue COMRESET via SControl */
2200comreset_retry:
81952c54 2201 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
bdd4ddde 2202 msleep(1);
22374677 2203
81952c54 2204 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
bdd4ddde 2205 msleep(20);
22374677 2206
31961943 2207 do {
81952c54 2208 sata_scr_read(ap, SCR_STATUS, &sstatus);
62f1d0e6 2209 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2210 break;
22374677 2211
bdd4ddde 2212 msleep(1);
c5d3e45a 2213 } while (time_before(jiffies, deadline));
20f733e7 2214
22374677 2215 /* work around errata */
ee9ccdf7 2216 if (IS_GEN_II(hpriv) &&
22374677
JG
2217 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2218 (retry-- > 0))
2219 goto comreset_retry;
095fec88 2220
da3dbb17
TH
2221#ifdef DEBUG
2222 {
2223 u32 sstatus, serror, scontrol;
2224
2225 mv_scr_read(ap, SCR_STATUS, &sstatus);
2226 mv_scr_read(ap, SCR_ERROR, &serror);
2227 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2228 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2229 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2230 }
2231#endif
31961943 2232
bdd4ddde
JG
2233 if (ata_port_offline(ap)) {
2234 *class = ATA_DEV_NONE;
20f733e7
BR
2235 return;
2236 }
2237
22374677
JG
2238 /* even after SStatus reflects that device is ready,
2239 * it seems to take a while for link to be fully
2240 * established (and thus Status no longer 0x80/0x7F),
2241 * so we poll a bit for that, here.
2242 */
2243 retry = 20;
2244 while (1) {
2245 u8 drv_stat = ata_check_status(ap);
2246 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2247 break;
bdd4ddde 2248 msleep(500);
22374677
JG
2249 if (retry-- <= 0)
2250 break;
bdd4ddde
JG
2251 if (time_after(jiffies, deadline))
2252 break;
22374677
JG
2253 }
2254
bdd4ddde
JG
2255 /* FIXME: if we passed the deadline, the following
2256 * code probably produces an invalid result
2257 */
20f733e7 2258
bdd4ddde
JG
2259 /* finally, read device signature from TF registers */
2260 *class = ata_dev_try_classify(ap, 0, NULL);
095fec88
JG
2261
2262 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2263
bdd4ddde 2264 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2265
bca1c4eb 2266 VPRINTK("EXIT\n");
20f733e7
BR
2267}
2268
bdd4ddde 2269static int mv_prereset(struct ata_port *ap, unsigned long deadline)
22374677 2270{
bdd4ddde
JG
2271 struct mv_port_priv *pp = ap->private_data;
2272 struct ata_eh_context *ehc = &ap->eh_context;
2273 int rc;
0ea9e179 2274
bdd4ddde
JG
2275 rc = mv_stop_dma(ap);
2276 if (rc)
2277 ehc->i.action |= ATA_EH_HARDRESET;
2278
2279 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2280 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2281 ehc->i.action |= ATA_EH_HARDRESET;
2282 }
2283
2284 /* if we're about to do hardreset, nothing more to do */
2285 if (ehc->i.action & ATA_EH_HARDRESET)
2286 return 0;
2287
2288 if (ata_port_online(ap))
2289 rc = ata_wait_ready(ap, deadline);
2290 else
2291 rc = -ENODEV;
2292
2293 return rc;
22374677
JG
2294}
2295
bdd4ddde
JG
2296static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2297 unsigned long deadline)
31961943 2298{
bdd4ddde 2299 struct mv_host_priv *hpriv = ap->host->private_data;
0d5ff566 2300 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
31961943 2301
bdd4ddde 2302 mv_stop_dma(ap);
31961943 2303
bdd4ddde 2304 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2305
bdd4ddde
JG
2306 mv_phy_reset(ap, class, deadline);
2307
2308 return 0;
2309}
2310
2311static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2312{
2313 u32 serr;
2314
2315 /* print link status */
2316 sata_print_link_status(ap);
31961943 2317
bdd4ddde
JG
2318 /* clear SError */
2319 sata_scr_read(ap, SCR_ERROR, &serr);
2320 sata_scr_write_flush(ap, SCR_ERROR, serr);
2321
2322 /* bail out if no device is present */
2323 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2324 DPRINTK("EXIT, no device\n");
2325 return;
9b358e30 2326 }
bdd4ddde
JG
2327
2328 /* set up device control */
2329 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2330}
2331
2332static void mv_error_handler(struct ata_port *ap)
2333{
2334 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2335 mv_hardreset, mv_postreset);
2336}
2337
2338static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2339{
2340 mv_stop_dma(qc->ap);
2341}
2342
2343static void mv_eh_freeze(struct ata_port *ap)
2344{
2345 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2346 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2347 u32 tmp, mask;
2348 unsigned int shift;
2349
2350 /* FIXME: handle coalescing completion events properly */
2351
2352 shift = ap->port_no * 2;
2353 if (hc > 0)
2354 shift++;
2355
2356 mask = 0x3 << shift;
2357
2358 /* disable assertion of portN err, done events */
2359 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2360 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2361}
2362
2363static void mv_eh_thaw(struct ata_port *ap)
2364{
2365 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2366 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2367 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2368 void __iomem *port_mmio = mv_ap_base(ap);
2369 u32 tmp, mask, hc_irq_cause;
2370 unsigned int shift, hc_port_no = ap->port_no;
2371
2372 /* FIXME: handle coalescing completion events properly */
2373
2374 shift = ap->port_no * 2;
2375 if (hc > 0) {
2376 shift++;
2377 hc_port_no -= 4;
2378 }
2379
2380 mask = 0x3 << shift;
2381
2382 /* clear EDMA errors on this port */
2383 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2384
2385 /* clear pending irq events */
2386 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2387 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2388 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2389 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2390
2391 /* enable assertion of portN err, done events */
2392 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2393 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
31961943
BR
2394}
2395
05b308e1
BR
2396/**
2397 * mv_port_init - Perform some early initialization on a single port.
2398 * @port: libata data structure storing shadow register addresses
2399 * @port_mmio: base address of the port
2400 *
2401 * Initialize shadow register mmio addresses, clear outstanding
2402 * interrupts on the port, and unmask interrupts for the future
2403 * start of the port.
2404 *
2405 * LOCKING:
2406 * Inherited from caller.
2407 */
31961943 2408static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2409{
0d5ff566 2410 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2411 unsigned serr_ofs;
2412
8b260248 2413 /* PIO related setup
31961943
BR
2414 */
2415 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2416 port->error_addr =
31961943
BR
2417 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2418 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2419 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2420 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2421 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2422 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2423 port->status_addr =
31961943
BR
2424 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2425 /* special case: control/altstatus doesn't have ATA_REG_ address */
2426 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2427
2428 /* unused: */
8d9db2d2 2429 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2430
31961943
BR
2431 /* Clear any currently outstanding port interrupt conditions */
2432 serr_ofs = mv_scr_offset(SCR_ERROR);
2433 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2434 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2435
20f733e7 2436 /* unmask all EDMA error interrupts */
31961943 2437 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2438
8b260248 2439 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2440 readl(port_mmio + EDMA_CFG_OFS),
2441 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2442 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2443}
2444
4447d351 2445static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2446{
4447d351
TH
2447 struct pci_dev *pdev = to_pci_dev(host->dev);
2448 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2449 u32 hp_flags = hpriv->hp_flags;
2450
bca1c4eb 2451 switch(board_idx) {
47c2b677
JG
2452 case chip_5080:
2453 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2454 hp_flags |= MV_HP_GEN_I;
47c2b677 2455
44c10138 2456 switch (pdev->revision) {
47c2b677
JG
2457 case 0x1:
2458 hp_flags |= MV_HP_ERRATA_50XXB0;
2459 break;
2460 case 0x3:
2461 hp_flags |= MV_HP_ERRATA_50XXB2;
2462 break;
2463 default:
2464 dev_printk(KERN_WARNING, &pdev->dev,
2465 "Applying 50XXB2 workarounds to unknown rev\n");
2466 hp_flags |= MV_HP_ERRATA_50XXB2;
2467 break;
2468 }
2469 break;
2470
bca1c4eb
JG
2471 case chip_504x:
2472 case chip_508x:
47c2b677 2473 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2474 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2475
44c10138 2476 switch (pdev->revision) {
47c2b677
JG
2477 case 0x0:
2478 hp_flags |= MV_HP_ERRATA_50XXB0;
2479 break;
2480 case 0x3:
2481 hp_flags |= MV_HP_ERRATA_50XXB2;
2482 break;
2483 default:
2484 dev_printk(KERN_WARNING, &pdev->dev,
2485 "Applying B2 workarounds to unknown rev\n");
2486 hp_flags |= MV_HP_ERRATA_50XXB2;
2487 break;
bca1c4eb
JG
2488 }
2489 break;
2490
2491 case chip_604x:
2492 case chip_608x:
47c2b677 2493 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2494 hp_flags |= MV_HP_GEN_II;
47c2b677 2495
44c10138 2496 switch (pdev->revision) {
47c2b677
JG
2497 case 0x7:
2498 hp_flags |= MV_HP_ERRATA_60X1B2;
2499 break;
2500 case 0x9:
2501 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2502 break;
2503 default:
2504 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2505 "Applying B2 workarounds to unknown rev\n");
2506 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2507 break;
2508 }
2509 break;
2510
e4e7b892
JG
2511 case chip_7042:
2512 case chip_6042:
2513 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2514 hp_flags |= MV_HP_GEN_IIE;
2515
44c10138 2516 switch (pdev->revision) {
e4e7b892
JG
2517 case 0x0:
2518 hp_flags |= MV_HP_ERRATA_XX42A0;
2519 break;
2520 case 0x1:
2521 hp_flags |= MV_HP_ERRATA_60X1C0;
2522 break;
2523 default:
2524 dev_printk(KERN_WARNING, &pdev->dev,
2525 "Applying 60X1C0 workarounds to unknown rev\n");
2526 hp_flags |= MV_HP_ERRATA_60X1C0;
2527 break;
2528 }
2529 break;
2530
bca1c4eb
JG
2531 default:
2532 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2533 return 1;
2534 }
2535
2536 hpriv->hp_flags = hp_flags;
2537
2538 return 0;
2539}
2540
05b308e1 2541/**
47c2b677 2542 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2543 * @host: ATA host to initialize
2544 * @board_idx: controller index
05b308e1
BR
2545 *
2546 * If possible, do an early global reset of the host. Then do
2547 * our port init and clear/unmask all/relevant host interrupts.
2548 *
2549 * LOCKING:
2550 * Inherited from caller.
2551 */
4447d351 2552static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2553{
2554 int rc = 0, n_hc, port, hc;
4447d351
TH
2555 struct pci_dev *pdev = to_pci_dev(host->dev);
2556 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2557 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb 2558
47c2b677
JG
2559 /* global interrupt mask */
2560 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2561
4447d351 2562 rc = mv_chip_id(host, board_idx);
bca1c4eb
JG
2563 if (rc)
2564 goto done;
2565
4447d351 2566 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2567
4447d351 2568 for (port = 0; port < host->n_ports; port++)
47c2b677 2569 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2570
c9d39130 2571 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2572 if (rc)
20f733e7 2573 goto done;
20f733e7 2574
522479fb
JG
2575 hpriv->ops->reset_flash(hpriv, mmio);
2576 hpriv->ops->reset_bus(pdev, mmio);
47c2b677 2577 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2578
4447d351 2579 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2580 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2581 void __iomem *port_mmio = mv_port_base(mmio, port);
2582
2a47ce06 2583 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2584 ifctl |= (1 << 7); /* enable gen2i speed */
2585 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2586 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2587 }
2588
c9d39130 2589 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2590 }
2591
4447d351 2592 for (port = 0; port < host->n_ports; port++) {
2a47ce06 2593 void __iomem *port_mmio = mv_port_base(mmio, port);
4447d351 2594 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
20f733e7
BR
2595 }
2596
2597 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2598 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2599
2600 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2601 "(before clear)=0x%08x\n", hc,
2602 readl(hc_mmio + HC_CFG_OFS),
2603 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2604
2605 /* Clear any currently outstanding hc interrupt conditions */
2606 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2607 }
2608
31961943
BR
2609 /* Clear any currently outstanding host interrupt conditions */
2610 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2611
2612 /* and unmask interrupt generation for host regs */
2613 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
fb621e2f 2614
ee9ccdf7 2615 if (IS_GEN_I(hpriv))
fb621e2f
JG
2616 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2617 else
2618 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
2619
2620 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
8b260248 2621 "PCI int cause/mask=0x%08x/0x%08x\n",
20f733e7
BR
2622 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2623 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2624 readl(mmio + PCI_IRQ_CAUSE_OFS),
2625 readl(mmio + PCI_IRQ_MASK_OFS));
bca1c4eb 2626
31961943 2627done:
20f733e7
BR
2628 return rc;
2629}
2630
05b308e1
BR
2631/**
2632 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2633 * @host: ATA host to print info about
05b308e1
BR
2634 *
2635 * FIXME: complete this.
2636 *
2637 * LOCKING:
2638 * Inherited from caller.
2639 */
4447d351 2640static void mv_print_info(struct ata_host *host)
31961943 2641{
4447d351
TH
2642 struct pci_dev *pdev = to_pci_dev(host->dev);
2643 struct mv_host_priv *hpriv = host->private_data;
44c10138 2644 u8 scc;
c1e4fe71 2645 const char *scc_s, *gen;
31961943
BR
2646
2647 /* Use this to determine the HW stepping of the chip so we know
2648 * what errata to workaround
2649 */
31961943
BR
2650 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2651 if (scc == 0)
2652 scc_s = "SCSI";
2653 else if (scc == 0x01)
2654 scc_s = "RAID";
2655 else
c1e4fe71
JG
2656 scc_s = "?";
2657
2658 if (IS_GEN_I(hpriv))
2659 gen = "I";
2660 else if (IS_GEN_II(hpriv))
2661 gen = "II";
2662 else if (IS_GEN_IIE(hpriv))
2663 gen = "IIE";
2664 else
2665 gen = "?";
31961943 2666
a9524a76 2667 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2668 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2669 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2670 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2671}
2672
05b308e1
BR
2673/**
2674 * mv_init_one - handle a positive probe of a Marvell host
2675 * @pdev: PCI device found
2676 * @ent: PCI device ID entry for the matched host
2677 *
2678 * LOCKING:
2679 * Inherited from caller.
2680 */
20f733e7
BR
2681static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2682{
2683 static int printed_version = 0;
20f733e7 2684 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
2685 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2686 struct ata_host *host;
2687 struct mv_host_priv *hpriv;
2688 int n_ports, rc;
20f733e7 2689
a9524a76
JG
2690 if (!printed_version++)
2691 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 2692
4447d351
TH
2693 /* allocate host */
2694 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2695
2696 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2697 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2698 if (!host || !hpriv)
2699 return -ENOMEM;
2700 host->private_data = hpriv;
2701
2702 /* acquire resources */
24dc5f33
TH
2703 rc = pcim_enable_device(pdev);
2704 if (rc)
20f733e7 2705 return rc;
20f733e7 2706
0d5ff566
TH
2707 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2708 if (rc == -EBUSY)
24dc5f33 2709 pcim_pin_device(pdev);
0d5ff566 2710 if (rc)
24dc5f33 2711 return rc;
4447d351 2712 host->iomap = pcim_iomap_table(pdev);
20f733e7 2713
d88184fb
JG
2714 rc = pci_go_64(pdev);
2715 if (rc)
2716 return rc;
2717
20f733e7 2718 /* initialize adapter */
4447d351 2719 rc = mv_init_host(host, board_idx);
24dc5f33
TH
2720 if (rc)
2721 return rc;
20f733e7 2722
31961943 2723 /* Enable interrupts */
6a59dcf8 2724 if (msi && pci_enable_msi(pdev))
31961943 2725 pci_intx(pdev, 1);
20f733e7 2726
31961943 2727 mv_dump_pci_cfg(pdev, 0x68);
4447d351 2728 mv_print_info(host);
20f733e7 2729
4447d351 2730 pci_set_master(pdev);
ea8b4db9 2731 pci_try_set_mwi(pdev);
4447d351 2732 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 2733 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7
BR
2734}
2735
2736static int __init mv_init(void)
2737{
b7887196 2738 return pci_register_driver(&mv_pci_driver);
20f733e7
BR
2739}
2740
2741static void __exit mv_exit(void)
2742{
2743 pci_unregister_driver(&mv_pci_driver);
2744}
2745
2746MODULE_AUTHOR("Brett Russ");
2747MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2748MODULE_LICENSE("GPL");
2749MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2750MODULE_VERSION(DRV_VERSION);
2751
ddef9bb3
JG
2752module_param(msi, int, 0444);
2753MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2754
20f733e7
BR
2755module_init(mv_init);
2756module_exit(mv_exit);