]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/sata_mv.c
[libata] sata_mv: Minor cleanups and renaming, preparing for new EH & NCQ
[net-next-2.6.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane
33 probing/error handling in general. MUST HAVE.
34
35 3) Add hotplug support (easy, once new-EH support appears)
36
37 4) Add NCQ support (easy to intermediate, once new-EH support appears)
38
39 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
40
41 6) Add port multiplier support (intermediate)
42
43 7) Test and verify 3.0 Gbps support
44
45 8) Develop a low-power-consumption strategy, and implement it.
46
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
49 like that.
50
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
55
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
59
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
62
63 13) Verify that 7042 is fully supported. I only have a 6042.
64
65*/
66
67
20f733e7
BR
68#include <linux/kernel.h>
69#include <linux/module.h>
70#include <linux/pci.h>
71#include <linux/init.h>
72#include <linux/blkdev.h>
73#include <linux/delay.h>
74#include <linux/interrupt.h>
20f733e7 75#include <linux/dma-mapping.h>
a9524a76 76#include <linux/device.h>
20f733e7 77#include <scsi/scsi_host.h>
193515d5 78#include <scsi/scsi_cmnd.h>
20f733e7 79#include <linux/libata.h>
20f733e7
BR
80
81#define DRV_NAME "sata_mv"
8bc3fc47 82#define DRV_VERSION "0.81"
20f733e7
BR
83
84enum {
85 /* BAR's are enumerated in terms of pci_resource_start() terms */
86 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
87 MV_IO_BAR = 2, /* offset 0x18: IO space */
88 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
89
90 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
91 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
92
93 MV_PCI_REG_BASE = 0,
94 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
95 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
96 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
97 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
98 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
99 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
100
20f733e7 101 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 102 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
103 MV_GPIO_PORT_CTL = 0x104f0,
104 MV_RESET_CFG = 0x180d8,
20f733e7
BR
105
106 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
107 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
109 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
110
31961943
BR
111 MV_MAX_Q_DEPTH = 32,
112 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
113
114 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
115 * CRPB needs alignment on a 256B boundary. Size == 256B
116 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
121 MV_MAX_SG_CT = 176,
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
123 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
124
20f733e7
BR
125 MV_PORTS_PER_HC = 4,
126 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
127 MV_PORT_HC_SHIFT = 2,
31961943 128 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
129 MV_PORT_MASK = 3,
130
131 /* Host Flags */
132 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
133 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
c5d3e45a
JG
134 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
135 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
136 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
47c2b677 137 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 138
31961943
BR
139 CRQB_FLAG_READ = (1 << 0),
140 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
141 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
142 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
143 CRQB_CMD_ADDR_SHIFT = 8,
144 CRQB_CMD_CS = (0x2 << 11),
145 CRQB_CMD_LAST = (1 << 15),
146
147 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
148 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
149 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
150
151 EPRD_FLAG_END_OF_TBL = (1 << 31),
152
20f733e7
BR
153 /* PCI interface registers */
154
31961943
BR
155 PCI_COMMAND_OFS = 0xc00,
156
20f733e7
BR
157 PCI_MAIN_CMD_STS_OFS = 0xd30,
158 STOP_PCI_MASTER = (1 << 2),
159 PCI_MASTER_EMPTY = (1 << 3),
160 GLOB_SFT_RST = (1 << 4),
161
522479fb
JG
162 MV_PCI_MODE = 0xd00,
163 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
164 MV_PCI_DISC_TIMER = 0xd04,
165 MV_PCI_MSI_TRIGGER = 0xc38,
166 MV_PCI_SERR_MASK = 0xc28,
167 MV_PCI_XBAR_TMOUT = 0x1d04,
168 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
169 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
170 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
171 MV_PCI_ERR_COMMAND = 0x1d50,
172
173 PCI_IRQ_CAUSE_OFS = 0x1d58,
174 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
175 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
176
177 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
178 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
179 PORT0_ERR = (1 << 0), /* shift by port # */
180 PORT0_DONE = (1 << 1), /* shift by port # */
181 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
182 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
183 PCI_ERR = (1 << 18),
184 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
185 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
186 PORTS_0_3_COAL_DONE = (1 << 8),
187 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
188 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
189 GPIO_INT = (1 << 22),
190 SELF_INT = (1 << 23),
191 TWSI_INT = (1 << 24),
192 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 193 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
8b260248 194 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
195 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
196 HC_MAIN_RSVD),
fb621e2f
JG
197 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
198 HC_MAIN_RSVD_5),
20f733e7
BR
199
200 /* SATAHC registers */
201 HC_CFG_OFS = 0,
202
203 HC_IRQ_CAUSE_OFS = 0x14,
31961943 204 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
205 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
206 DEV_IRQ = (1 << 8), /* shift by port # */
207
208 /* Shadow block registers */
31961943
BR
209 SHD_BLK_OFS = 0x100,
210 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
211
212 /* SATA registers */
213 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
214 SATA_ACTIVE_OFS = 0x350,
47c2b677 215 PHY_MODE3 = 0x310,
bca1c4eb
JG
216 PHY_MODE4 = 0x314,
217 PHY_MODE2 = 0x330,
c9d39130
JG
218 MV5_PHY_MODE = 0x74,
219 MV5_LT_MODE = 0x30,
220 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
221 SATA_INTERFACE_CTL = 0x050,
222
223 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
224
225 /* Port registers */
226 EDMA_CFG_OFS = 0,
31961943
BR
227 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
228 EDMA_CFG_NCQ = (1 << 5),
229 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
230 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
231 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
232
233 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
234 EDMA_ERR_IRQ_MASK_OFS = 0xc,
235 EDMA_ERR_D_PAR = (1 << 0),
236 EDMA_ERR_PRD_PAR = (1 << 1),
237 EDMA_ERR_DEV = (1 << 2),
238 EDMA_ERR_DEV_DCON = (1 << 3),
239 EDMA_ERR_DEV_CON = (1 << 4),
240 EDMA_ERR_SERR = (1 << 5),
c5d3e45a
JG
241 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
242 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
20f733e7 243 EDMA_ERR_BIST_ASYNC = (1 << 8),
c5d3e45a 244 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
20f733e7
BR
245 EDMA_ERR_CRBQ_PAR = (1 << 9),
246 EDMA_ERR_CRPB_PAR = (1 << 10),
247 EDMA_ERR_INTRL_PAR = (1 << 11),
248 EDMA_ERR_IORDY = (1 << 12),
249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
250 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
251 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
252 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
253 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
254 EDMA_ERR_TRANS_PROTO = (1 << 31),
c5d3e45a
JG
255 EDMA_ERR_OVERRUN_5 = (1 << 5),
256 EDMA_ERR_UNDERRUN_5 = (1 << 6),
8b260248 257 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
20f733e7
BR
258 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
259 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
8b260248 260 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
20f733e7 261 EDMA_ERR_LNK_DATA_RX |
8b260248 262 EDMA_ERR_LNK_DATA_TX |
20f733e7
BR
263 EDMA_ERR_TRANS_PROTO),
264
31961943
BR
265 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
266 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
267
268 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
269 EDMA_REQ_Q_PTR_SHIFT = 5,
270
271 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
272 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
273 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
274 EDMA_RSP_Q_PTR_SHIFT = 3,
275
20f733e7
BR
276 EDMA_CMD_OFS = 0x28,
277 EDMA_EN = (1 << 0),
278 EDMA_DS = (1 << 1),
279 ATA_RST = (1 << 2),
280
c9d39130 281 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 282 EDMA_ARB_CFG = 0x38,
bca1c4eb 283
31961943
BR
284 /* Host private flags (hp_flags) */
285 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
286 MV_HP_ERRATA_50XXB0 = (1 << 1),
287 MV_HP_ERRATA_50XXB2 = (1 << 2),
288 MV_HP_ERRATA_60X1B2 = (1 << 3),
289 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892
JG
290 MV_HP_ERRATA_XX42A0 = (1 << 5),
291 MV_HP_50XX = (1 << 6),
292 MV_HP_GEN_IIE = (1 << 7),
20f733e7 293
31961943
BR
294 /* Port private flags (pp_flags) */
295 MV_PP_FLAG_EDMA_EN = (1 << 0),
296 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
c5d3e45a 297 MV_PP_FLAG_HAD_A_RESET = (1 << 2),
20f733e7
BR
298};
299
c9d39130 300#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
bca1c4eb 301#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
e4e7b892
JG
302#define IS_GEN_I(hpriv) IS_50XX(hpriv)
303#define IS_GEN_II(hpriv) IS_60XX(hpriv)
304#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
bca1c4eb 305
095fec88 306enum {
d88184fb 307 MV_DMA_BOUNDARY = 0xffffffffU,
095fec88
JG
308
309 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
310
311 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
312};
313
522479fb
JG
314enum chip_type {
315 chip_504x,
316 chip_508x,
317 chip_5080,
318 chip_604x,
319 chip_608x,
e4e7b892
JG
320 chip_6042,
321 chip_7042,
522479fb
JG
322};
323
31961943
BR
324/* Command ReQuest Block: 32B */
325struct mv_crqb {
e1469874
ML
326 __le32 sg_addr;
327 __le32 sg_addr_hi;
328 __le16 ctrl_flags;
329 __le16 ata_cmd[11];
31961943 330};
20f733e7 331
e4e7b892 332struct mv_crqb_iie {
e1469874
ML
333 __le32 addr;
334 __le32 addr_hi;
335 __le32 flags;
336 __le32 len;
337 __le32 ata_cmd[4];
e4e7b892
JG
338};
339
31961943
BR
340/* Command ResPonse Block: 8B */
341struct mv_crpb {
e1469874
ML
342 __le16 id;
343 __le16 flags;
344 __le32 tmstmp;
20f733e7
BR
345};
346
31961943
BR
347/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
348struct mv_sg {
e1469874
ML
349 __le32 addr;
350 __le32 flags_size;
351 __le32 addr_hi;
352 __le32 reserved;
31961943 353};
20f733e7 354
31961943
BR
355struct mv_port_priv {
356 struct mv_crqb *crqb;
357 dma_addr_t crqb_dma;
358 struct mv_crpb *crpb;
359 dma_addr_t crpb_dma;
360 struct mv_sg *sg_tbl;
361 dma_addr_t sg_tbl_dma;
31961943
BR
362 u32 pp_flags;
363};
364
bca1c4eb
JG
365struct mv_port_signal {
366 u32 amps;
367 u32 pre;
368};
369
47c2b677
JG
370struct mv_host_priv;
371struct mv_hw_ops {
2a47ce06
JG
372 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
373 unsigned int port);
47c2b677
JG
374 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
375 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
376 void __iomem *mmio);
c9d39130
JG
377 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
378 unsigned int n_hc);
522479fb
JG
379 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
380 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
47c2b677
JG
381};
382
31961943
BR
383struct mv_host_priv {
384 u32 hp_flags;
bca1c4eb 385 struct mv_port_signal signal[8];
47c2b677 386 const struct mv_hw_ops *ops;
20f733e7
BR
387};
388
389static void mv_irq_clear(struct ata_port *ap);
390static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
391static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
c9d39130
JG
392static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
393static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
20f733e7 394static void mv_phy_reset(struct ata_port *ap);
22374677 395static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
31961943
BR
396static int mv_port_start(struct ata_port *ap);
397static void mv_port_stop(struct ata_port *ap);
398static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 399static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 400static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
31961943 401static void mv_eng_timeout(struct ata_port *ap);
20f733e7
BR
402static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
403
2a47ce06
JG
404static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
405 unsigned int port);
47c2b677
JG
406static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
407static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
408 void __iomem *mmio);
c9d39130
JG
409static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
410 unsigned int n_hc);
522479fb
JG
411static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
412static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
47c2b677 413
2a47ce06
JG
414static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
415 unsigned int port);
47c2b677
JG
416static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
417static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
418 void __iomem *mmio);
c9d39130
JG
419static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
420 unsigned int n_hc);
522479fb
JG
421static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
422static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
c9d39130
JG
423static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
424 unsigned int port_no);
425static void mv_stop_and_reset(struct ata_port *ap);
47c2b677 426
c5d3e45a
JG
427static struct scsi_host_template mv5_sht = {
428 .module = THIS_MODULE,
429 .name = DRV_NAME,
430 .ioctl = ata_scsi_ioctl,
431 .queuecommand = ata_scsi_queuecmd,
432 .can_queue = ATA_DEF_QUEUE,
433 .this_id = ATA_SHT_THIS_ID,
434 .sg_tablesize = MV_MAX_SG_CT,
435 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
436 .emulated = ATA_SHT_EMULATED,
437 .use_clustering = 1,
438 .proc_name = DRV_NAME,
439 .dma_boundary = MV_DMA_BOUNDARY,
440 .slave_configure = ata_scsi_slave_config,
441 .slave_destroy = ata_scsi_slave_destroy,
442 .bios_param = ata_std_bios_param,
443};
444
445static struct scsi_host_template mv6_sht = {
20f733e7
BR
446 .module = THIS_MODULE,
447 .name = DRV_NAME,
448 .ioctl = ata_scsi_ioctl,
449 .queuecommand = ata_scsi_queuecmd,
c5d3e45a 450 .can_queue = ATA_DEF_QUEUE,
20f733e7 451 .this_id = ATA_SHT_THIS_ID,
d88184fb 452 .sg_tablesize = MV_MAX_SG_CT,
20f733e7
BR
453 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
454 .emulated = ATA_SHT_EMULATED,
d88184fb 455 .use_clustering = 1,
20f733e7
BR
456 .proc_name = DRV_NAME,
457 .dma_boundary = MV_DMA_BOUNDARY,
458 .slave_configure = ata_scsi_slave_config,
ccf68c34 459 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 460 .bios_param = ata_std_bios_param,
20f733e7
BR
461};
462
c9d39130
JG
463static const struct ata_port_operations mv5_ops = {
464 .port_disable = ata_port_disable,
465
466 .tf_load = ata_tf_load,
467 .tf_read = ata_tf_read,
468 .check_status = ata_check_status,
469 .exec_command = ata_exec_command,
470 .dev_select = ata_std_dev_select,
471
472 .phy_reset = mv_phy_reset,
cffacd85 473 .cable_detect = ata_cable_sata,
c9d39130
JG
474
475 .qc_prep = mv_qc_prep,
476 .qc_issue = mv_qc_issue,
0d5ff566 477 .data_xfer = ata_data_xfer,
c9d39130
JG
478
479 .eng_timeout = mv_eng_timeout,
480
c9d39130 481 .irq_clear = mv_irq_clear,
246ce3b6
AI
482 .irq_on = ata_irq_on,
483 .irq_ack = ata_irq_ack,
c9d39130
JG
484
485 .scr_read = mv5_scr_read,
486 .scr_write = mv5_scr_write,
487
488 .port_start = mv_port_start,
489 .port_stop = mv_port_stop,
c9d39130
JG
490};
491
492static const struct ata_port_operations mv6_ops = {
20f733e7
BR
493 .port_disable = ata_port_disable,
494
495 .tf_load = ata_tf_load,
496 .tf_read = ata_tf_read,
497 .check_status = ata_check_status,
498 .exec_command = ata_exec_command,
499 .dev_select = ata_std_dev_select,
500
501 .phy_reset = mv_phy_reset,
cffacd85 502 .cable_detect = ata_cable_sata,
20f733e7 503
31961943
BR
504 .qc_prep = mv_qc_prep,
505 .qc_issue = mv_qc_issue,
0d5ff566 506 .data_xfer = ata_data_xfer,
20f733e7 507
31961943 508 .eng_timeout = mv_eng_timeout,
20f733e7 509
20f733e7 510 .irq_clear = mv_irq_clear,
246ce3b6
AI
511 .irq_on = ata_irq_on,
512 .irq_ack = ata_irq_ack,
20f733e7
BR
513
514 .scr_read = mv_scr_read,
515 .scr_write = mv_scr_write,
516
31961943
BR
517 .port_start = mv_port_start,
518 .port_stop = mv_port_stop,
20f733e7
BR
519};
520
e4e7b892
JG
521static const struct ata_port_operations mv_iie_ops = {
522 .port_disable = ata_port_disable,
523
524 .tf_load = ata_tf_load,
525 .tf_read = ata_tf_read,
526 .check_status = ata_check_status,
527 .exec_command = ata_exec_command,
528 .dev_select = ata_std_dev_select,
529
530 .phy_reset = mv_phy_reset,
cffacd85 531 .cable_detect = ata_cable_sata,
e4e7b892
JG
532
533 .qc_prep = mv_qc_prep_iie,
534 .qc_issue = mv_qc_issue,
0d5ff566 535 .data_xfer = ata_data_xfer,
e4e7b892
JG
536
537 .eng_timeout = mv_eng_timeout,
538
e4e7b892 539 .irq_clear = mv_irq_clear,
246ce3b6
AI
540 .irq_on = ata_irq_on,
541 .irq_ack = ata_irq_ack,
e4e7b892
JG
542
543 .scr_read = mv_scr_read,
544 .scr_write = mv_scr_write,
545
546 .port_start = mv_port_start,
547 .port_stop = mv_port_stop,
e4e7b892
JG
548};
549
98ac62de 550static const struct ata_port_info mv_port_info[] = {
20f733e7 551 { /* chip_504x */
cca3974e 552 .flags = MV_COMMON_FLAGS,
31961943 553 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 554 .udma_mask = ATA_UDMA6,
c9d39130 555 .port_ops = &mv5_ops,
20f733e7
BR
556 },
557 { /* chip_508x */
c5d3e45a 558 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 559 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 560 .udma_mask = ATA_UDMA6,
c9d39130 561 .port_ops = &mv5_ops,
20f733e7 562 },
47c2b677 563 { /* chip_5080 */
c5d3e45a 564 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 565 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 566 .udma_mask = ATA_UDMA6,
c9d39130 567 .port_ops = &mv5_ops,
47c2b677 568 },
20f733e7 569 { /* chip_604x */
c5d3e45a 570 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
31961943 571 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 572 .udma_mask = ATA_UDMA6,
c9d39130 573 .port_ops = &mv6_ops,
20f733e7
BR
574 },
575 { /* chip_608x */
c5d3e45a
JG
576 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
577 MV_FLAG_DUAL_HC,
31961943 578 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 579 .udma_mask = ATA_UDMA6,
c9d39130 580 .port_ops = &mv6_ops,
20f733e7 581 },
e4e7b892 582 { /* chip_6042 */
c5d3e45a 583 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 584 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 585 .udma_mask = ATA_UDMA6,
e4e7b892
JG
586 .port_ops = &mv_iie_ops,
587 },
588 { /* chip_7042 */
c5d3e45a 589 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 590 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 591 .udma_mask = ATA_UDMA6,
e4e7b892
JG
592 .port_ops = &mv_iie_ops,
593 },
20f733e7
BR
594};
595
3b7d697d 596static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
597 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
598 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
599 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
600 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
601
602 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
603 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
604 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
605 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
606 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
607
608 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
609
d9f9c6bc
FA
610 /* Adaptec 1430SA */
611 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
612
e93f09dc
OJ
613 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
614
6a3d586d
MT
615 /* add Marvell 7042 support */
616 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
617
2d2744fc 618 { } /* terminate list */
20f733e7
BR
619};
620
621static struct pci_driver mv_pci_driver = {
622 .name = DRV_NAME,
623 .id_table = mv_pci_tbl,
624 .probe = mv_init_one,
625 .remove = ata_pci_remove_one,
626};
627
47c2b677
JG
628static const struct mv_hw_ops mv5xxx_ops = {
629 .phy_errata = mv5_phy_errata,
630 .enable_leds = mv5_enable_leds,
631 .read_preamp = mv5_read_preamp,
632 .reset_hc = mv5_reset_hc,
522479fb
JG
633 .reset_flash = mv5_reset_flash,
634 .reset_bus = mv5_reset_bus,
47c2b677
JG
635};
636
637static const struct mv_hw_ops mv6xxx_ops = {
638 .phy_errata = mv6_phy_errata,
639 .enable_leds = mv6_enable_leds,
640 .read_preamp = mv6_read_preamp,
641 .reset_hc = mv6_reset_hc,
522479fb
JG
642 .reset_flash = mv6_reset_flash,
643 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
644};
645
ddef9bb3
JG
646/*
647 * module options
648 */
649static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
650
651
d88184fb
JG
652/* move to PCI layer or libata core? */
653static int pci_go_64(struct pci_dev *pdev)
654{
655 int rc;
656
657 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
658 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
659 if (rc) {
660 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
661 if (rc) {
662 dev_printk(KERN_ERR, &pdev->dev,
663 "64-bit DMA enable failed\n");
664 return rc;
665 }
666 }
667 } else {
668 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
669 if (rc) {
670 dev_printk(KERN_ERR, &pdev->dev,
671 "32-bit DMA enable failed\n");
672 return rc;
673 }
674 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
675 if (rc) {
676 dev_printk(KERN_ERR, &pdev->dev,
677 "32-bit consistent DMA enable failed\n");
678 return rc;
679 }
680 }
681
682 return rc;
683}
684
20f733e7
BR
685/*
686 * Functions
687 */
688
689static inline void writelfl(unsigned long data, void __iomem *addr)
690{
691 writel(data, addr);
692 (void) readl(addr); /* flush to avoid PCI posted write */
693}
694
20f733e7
BR
695static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
696{
697 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
698}
699
c9d39130
JG
700static inline unsigned int mv_hc_from_port(unsigned int port)
701{
702 return port >> MV_PORT_HC_SHIFT;
703}
704
705static inline unsigned int mv_hardport_from_port(unsigned int port)
706{
707 return port & MV_PORT_MASK;
708}
709
710static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
711 unsigned int port)
712{
713 return mv_hc_base(base, mv_hc_from_port(port));
714}
715
20f733e7
BR
716static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
717{
c9d39130 718 return mv_hc_base_from_port(base, port) +
8b260248 719 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 720 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
721}
722
723static inline void __iomem *mv_ap_base(struct ata_port *ap)
724{
0d5ff566 725 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
20f733e7
BR
726}
727
cca3974e 728static inline int mv_get_hc_count(unsigned long port_flags)
31961943 729{
cca3974e 730 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
731}
732
733static void mv_irq_clear(struct ata_port *ap)
20f733e7 734{
20f733e7
BR
735}
736
c5d3e45a
JG
737static void mv_set_edma_ptrs(void __iomem *port_mmio,
738 struct mv_host_priv *hpriv,
739 struct mv_port_priv *pp)
740{
741 /*
742 * initialize request queue
743 */
744 WARN_ON(pp->crqb_dma & 0x3ff);
745 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
746 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
747 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
748
749 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
750 writelfl(pp->crqb_dma & 0xffffffff,
751 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
752 else
753 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
754
755 /*
756 * initialize response queue
757 */
758 WARN_ON(pp->crpb_dma & 0xff);
759 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
760
761 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
762 writelfl(pp->crpb_dma & 0xffffffff,
763 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
764 else
765 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
766
767 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
768 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
769
770}
771
05b308e1
BR
772/**
773 * mv_start_dma - Enable eDMA engine
774 * @base: port base address
775 * @pp: port private data
776 *
beec7dbc
TH
777 * Verify the local cache of the eDMA state is accurate with a
778 * WARN_ON.
05b308e1
BR
779 *
780 * LOCKING:
781 * Inherited from caller.
782 */
c5d3e45a
JG
783static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
784 struct mv_port_priv *pp)
20f733e7 785{
c5d3e45a 786 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
afb0edd9
BR
787 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
788 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
789 }
beec7dbc 790 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
20f733e7
BR
791}
792
05b308e1
BR
793/**
794 * mv_stop_dma - Disable eDMA engine
795 * @ap: ATA channel to manipulate
796 *
beec7dbc
TH
797 * Verify the local cache of the eDMA state is accurate with a
798 * WARN_ON.
05b308e1
BR
799 *
800 * LOCKING:
801 * Inherited from caller.
802 */
c5d3e45a 803static int mv_stop_dma(struct ata_port *ap)
20f733e7 804{
31961943
BR
805 void __iomem *port_mmio = mv_ap_base(ap);
806 struct mv_port_priv *pp = ap->private_data;
31961943 807 u32 reg;
c5d3e45a 808 int i, err = 0;
31961943 809
afb0edd9
BR
810 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
811 /* Disable EDMA if active. The disable bit auto clears.
31961943 812 */
31961943
BR
813 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
814 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 815 } else {
beec7dbc 816 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
afb0edd9 817 }
8b260248 818
31961943
BR
819 /* now properly wait for the eDMA to stop */
820 for (i = 1000; i > 0; i--) {
821 reg = readl(port_mmio + EDMA_CMD_OFS);
822 if (!(EDMA_EN & reg)) {
823 break;
824 }
825 udelay(100);
826 }
827
c5d3e45a 828 if (reg & EDMA_EN) {
f15a1daf 829 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
afb0edd9 830 /* FIXME: Consider doing a reset here to recover */
c5d3e45a 831 err = -EIO;
31961943 832 }
c5d3e45a
JG
833
834 return err;
20f733e7
BR
835}
836
8a70f8dc 837#ifdef ATA_DEBUG
31961943 838static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 839{
31961943
BR
840 int b, w;
841 for (b = 0; b < bytes; ) {
842 DPRINTK("%p: ", start + b);
843 for (w = 0; b < bytes && w < 4; w++) {
844 printk("%08x ",readl(start + b));
845 b += sizeof(u32);
846 }
847 printk("\n");
848 }
31961943 849}
8a70f8dc
JG
850#endif
851
31961943
BR
852static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
853{
854#ifdef ATA_DEBUG
855 int b, w;
856 u32 dw;
857 for (b = 0; b < bytes; ) {
858 DPRINTK("%02x: ", b);
859 for (w = 0; b < bytes && w < 4; w++) {
860 (void) pci_read_config_dword(pdev,b,&dw);
861 printk("%08x ",dw);
862 b += sizeof(u32);
863 }
864 printk("\n");
865 }
866#endif
867}
868static void mv_dump_all_regs(void __iomem *mmio_base, int port,
869 struct pci_dev *pdev)
870{
871#ifdef ATA_DEBUG
8b260248 872 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
873 port >> MV_PORT_HC_SHIFT);
874 void __iomem *port_base;
875 int start_port, num_ports, p, start_hc, num_hcs, hc;
876
877 if (0 > port) {
878 start_hc = start_port = 0;
879 num_ports = 8; /* shld be benign for 4 port devs */
880 num_hcs = 2;
881 } else {
882 start_hc = port >> MV_PORT_HC_SHIFT;
883 start_port = port;
884 num_ports = num_hcs = 1;
885 }
8b260248 886 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
887 num_ports > 1 ? num_ports - 1 : start_port);
888
889 if (NULL != pdev) {
890 DPRINTK("PCI config space regs:\n");
891 mv_dump_pci_cfg(pdev, 0x68);
892 }
893 DPRINTK("PCI regs:\n");
894 mv_dump_mem(mmio_base+0xc00, 0x3c);
895 mv_dump_mem(mmio_base+0xd00, 0x34);
896 mv_dump_mem(mmio_base+0xf00, 0x4);
897 mv_dump_mem(mmio_base+0x1d00, 0x6c);
898 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 899 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
900 DPRINTK("HC regs (HC %i):\n", hc);
901 mv_dump_mem(hc_base, 0x1c);
902 }
903 for (p = start_port; p < start_port + num_ports; p++) {
904 port_base = mv_port_base(mmio_base, p);
905 DPRINTK("EDMA regs (port %i):\n",p);
906 mv_dump_mem(port_base, 0x54);
907 DPRINTK("SATA regs (port %i):\n",p);
908 mv_dump_mem(port_base+0x300, 0x60);
909 }
910#endif
20f733e7
BR
911}
912
913static unsigned int mv_scr_offset(unsigned int sc_reg_in)
914{
915 unsigned int ofs;
916
917 switch (sc_reg_in) {
918 case SCR_STATUS:
919 case SCR_CONTROL:
920 case SCR_ERROR:
921 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
922 break;
923 case SCR_ACTIVE:
924 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
925 break;
926 default:
927 ofs = 0xffffffffU;
928 break;
929 }
930 return ofs;
931}
932
933static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
934{
935 unsigned int ofs = mv_scr_offset(sc_reg_in);
936
35177265 937 if (0xffffffffU != ofs)
20f733e7 938 return readl(mv_ap_base(ap) + ofs);
35177265 939 else
20f733e7 940 return (u32) ofs;
20f733e7
BR
941}
942
943static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
944{
945 unsigned int ofs = mv_scr_offset(sc_reg_in);
946
35177265 947 if (0xffffffffU != ofs)
20f733e7 948 writelfl(val, mv_ap_base(ap) + ofs);
20f733e7
BR
949}
950
c5d3e45a
JG
951static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
952 void __iomem *port_mmio)
e4e7b892
JG
953{
954 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
955
956 /* set up non-NCQ EDMA configuration */
c5d3e45a 957 cfg &= ~(1 << 9); /* disable eQue */
e4e7b892 958
e728eabe
JG
959 if (IS_GEN_I(hpriv)) {
960 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 961 cfg |= (1 << 8); /* enab config burst size mask */
e728eabe 962 }
e4e7b892 963
e728eabe
JG
964 else if (IS_GEN_II(hpriv)) {
965 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 966 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
e728eabe
JG
967 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
968 }
e4e7b892
JG
969
970 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
971 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
972 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892
JG
973 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
974 cfg |= (1 << 18); /* enab early completion */
e728eabe
JG
975 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
976 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
977 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
e4e7b892
JG
978 }
979
980 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
981}
982
05b308e1
BR
983/**
984 * mv_port_start - Port specific init/start routine.
985 * @ap: ATA channel to manipulate
986 *
987 * Allocate and point to DMA memory, init port private memory,
988 * zero indices.
989 *
990 * LOCKING:
991 * Inherited from caller.
992 */
31961943
BR
993static int mv_port_start(struct ata_port *ap)
994{
cca3974e
JG
995 struct device *dev = ap->host->dev;
996 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
997 struct mv_port_priv *pp;
998 void __iomem *port_mmio = mv_ap_base(ap);
999 void *mem;
1000 dma_addr_t mem_dma;
24dc5f33 1001 int rc;
31961943 1002
24dc5f33 1003 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1004 if (!pp)
24dc5f33 1005 return -ENOMEM;
31961943 1006
24dc5f33
TH
1007 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1008 GFP_KERNEL);
6037d6bb 1009 if (!mem)
24dc5f33 1010 return -ENOMEM;
31961943
BR
1011 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1012
6037d6bb
JG
1013 rc = ata_pad_alloc(ap, dev);
1014 if (rc)
24dc5f33 1015 return rc;
6037d6bb 1016
8b260248 1017 /* First item in chunk of DMA memory:
31961943
BR
1018 * 32-slot command request table (CRQB), 32 bytes each in size
1019 */
1020 pp->crqb = mem;
1021 pp->crqb_dma = mem_dma;
1022 mem += MV_CRQB_Q_SZ;
1023 mem_dma += MV_CRQB_Q_SZ;
1024
8b260248 1025 /* Second item:
31961943
BR
1026 * 32-slot command response table (CRPB), 8 bytes each in size
1027 */
1028 pp->crpb = mem;
1029 pp->crpb_dma = mem_dma;
1030 mem += MV_CRPB_Q_SZ;
1031 mem_dma += MV_CRPB_Q_SZ;
1032
1033 /* Third item:
1034 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1035 */
1036 pp->sg_tbl = mem;
1037 pp->sg_tbl_dma = mem_dma;
1038
c5d3e45a 1039 mv_edma_cfg(ap, hpriv, port_mmio);
e4e7b892 1040
c5d3e45a 1041 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1042
31961943
BR
1043 /* Don't turn on EDMA here...do it before DMA commands only. Else
1044 * we'll be unable to send non-data, PIO, etc due to restricted access
1045 * to shadow regs.
1046 */
1047 ap->private_data = pp;
1048 return 0;
1049}
1050
05b308e1
BR
1051/**
1052 * mv_port_stop - Port specific cleanup/stop routine.
1053 * @ap: ATA channel to manipulate
1054 *
1055 * Stop DMA, cleanup port memory.
1056 *
1057 * LOCKING:
cca3974e 1058 * This routine uses the host lock to protect the DMA stop.
05b308e1 1059 */
31961943
BR
1060static void mv_port_stop(struct ata_port *ap)
1061{
afb0edd9 1062 unsigned long flags;
31961943 1063
cca3974e 1064 spin_lock_irqsave(&ap->host->lock, flags);
31961943 1065 mv_stop_dma(ap);
cca3974e 1066 spin_unlock_irqrestore(&ap->host->lock, flags);
31961943
BR
1067}
1068
05b308e1
BR
1069/**
1070 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1071 * @qc: queued command whose SG list to source from
1072 *
1073 * Populate the SG list and mark the last entry.
1074 *
1075 * LOCKING:
1076 * Inherited from caller.
1077 */
d88184fb 1078static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1079{
1080 struct mv_port_priv *pp = qc->ap->private_data;
d88184fb 1081 unsigned int n_sg = 0;
972c26bd 1082 struct scatterlist *sg;
d88184fb 1083 struct mv_sg *mv_sg;
31961943 1084
d88184fb 1085 mv_sg = pp->sg_tbl;
972c26bd 1086 ata_for_each_sg(sg, qc) {
d88184fb
JG
1087 dma_addr_t addr = sg_dma_address(sg);
1088 u32 sg_len = sg_dma_len(sg);
22374677 1089
d88184fb
JG
1090 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1091 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1092 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
22374677 1093
d88184fb
JG
1094 if (ata_sg_is_last(sg, qc))
1095 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
22374677 1096
d88184fb
JG
1097 mv_sg++;
1098 n_sg++;
31961943 1099 }
d88184fb
JG
1100
1101 return n_sg;
31961943
BR
1102}
1103
a6432436 1104static inline unsigned mv_inc_q_index(unsigned index)
31961943 1105{
a6432436 1106 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
31961943
BR
1107}
1108
e1469874 1109static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1110{
559eedad 1111 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1112 (last ? CRQB_CMD_LAST : 0);
559eedad 1113 *cmdw = cpu_to_le16(tmp);
31961943
BR
1114}
1115
05b308e1
BR
1116/**
1117 * mv_qc_prep - Host specific command preparation.
1118 * @qc: queued command to prepare
1119 *
1120 * This routine simply redirects to the general purpose routine
1121 * if command is not DMA. Else, it handles prep of the CRQB
1122 * (command request block), does some sanity checking, and calls
1123 * the SG load routine.
1124 *
1125 * LOCKING:
1126 * Inherited from caller.
1127 */
31961943
BR
1128static void mv_qc_prep(struct ata_queued_cmd *qc)
1129{
1130 struct ata_port *ap = qc->ap;
1131 struct mv_port_priv *pp = ap->private_data;
e1469874 1132 __le16 *cw;
31961943
BR
1133 struct ata_taskfile *tf;
1134 u16 flags = 0;
a6432436 1135 unsigned in_index;
31961943 1136
c5d3e45a 1137 if (qc->tf.protocol != ATA_PROT_DMA)
31961943 1138 return;
20f733e7 1139
31961943
BR
1140 /* Fill in command request block
1141 */
e4e7b892 1142 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1143 flags |= CRQB_FLAG_READ;
beec7dbc 1144 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943
BR
1145 flags |= qc->tag << CRQB_TAG_SHIFT;
1146
a6432436
ML
1147 /* get current queue index from hardware */
1148 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1149 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1150
1151 pp->crqb[in_index].sg_addr =
31961943 1152 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
a6432436 1153 pp->crqb[in_index].sg_addr_hi =
31961943 1154 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
a6432436 1155 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1156
a6432436 1157 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1158 tf = &qc->tf;
1159
1160 /* Sadly, the CRQB cannot accomodate all registers--there are
1161 * only 11 bytes...so we must pick and choose required
1162 * registers based on the command. So, we drop feature and
1163 * hob_feature for [RW] DMA commands, but they are needed for
1164 * NCQ. NCQ will drop hob_nsect.
20f733e7 1165 */
31961943
BR
1166 switch (tf->command) {
1167 case ATA_CMD_READ:
1168 case ATA_CMD_READ_EXT:
1169 case ATA_CMD_WRITE:
1170 case ATA_CMD_WRITE_EXT:
c15d85c8 1171 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1172 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1173 break;
1174#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1175 case ATA_CMD_FPDMA_READ:
1176 case ATA_CMD_FPDMA_WRITE:
8b260248 1177 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1178 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1179 break;
1180#endif /* FIXME: remove this line when NCQ added */
1181 default:
1182 /* The only other commands EDMA supports in non-queued and
1183 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1184 * of which are defined/used by Linux. If we get here, this
1185 * driver needs work.
1186 *
1187 * FIXME: modify libata to give qc_prep a return value and
1188 * return error here.
1189 */
1190 BUG_ON(tf->command);
1191 break;
1192 }
1193 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1194 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1195 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1196 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1197 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1198 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1199 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1200 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1201 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1202
e4e7b892
JG
1203 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1204 return;
1205 mv_fill_sg(qc);
1206}
1207
1208/**
1209 * mv_qc_prep_iie - Host specific command preparation.
1210 * @qc: queued command to prepare
1211 *
1212 * This routine simply redirects to the general purpose routine
1213 * if command is not DMA. Else, it handles prep of the CRQB
1214 * (command request block), does some sanity checking, and calls
1215 * the SG load routine.
1216 *
1217 * LOCKING:
1218 * Inherited from caller.
1219 */
1220static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1221{
1222 struct ata_port *ap = qc->ap;
1223 struct mv_port_priv *pp = ap->private_data;
1224 struct mv_crqb_iie *crqb;
1225 struct ata_taskfile *tf;
a6432436 1226 unsigned in_index;
e4e7b892
JG
1227 u32 flags = 0;
1228
c5d3e45a 1229 if (qc->tf.protocol != ATA_PROT_DMA)
e4e7b892
JG
1230 return;
1231
e4e7b892
JG
1232 /* Fill in Gen IIE command request block
1233 */
1234 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1235 flags |= CRQB_FLAG_READ;
1236
beec7dbc 1237 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892
JG
1238 flags |= qc->tag << CRQB_TAG_SHIFT;
1239
a6432436
ML
1240 /* get current queue index from hardware */
1241 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1242 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1243
1244 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
e4e7b892
JG
1245 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1246 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1247 crqb->flags = cpu_to_le32(flags);
1248
1249 tf = &qc->tf;
1250 crqb->ata_cmd[0] = cpu_to_le32(
1251 (tf->command << 16) |
1252 (tf->feature << 24)
1253 );
1254 crqb->ata_cmd[1] = cpu_to_le32(
1255 (tf->lbal << 0) |
1256 (tf->lbam << 8) |
1257 (tf->lbah << 16) |
1258 (tf->device << 24)
1259 );
1260 crqb->ata_cmd[2] = cpu_to_le32(
1261 (tf->hob_lbal << 0) |
1262 (tf->hob_lbam << 8) |
1263 (tf->hob_lbah << 16) |
1264 (tf->hob_feature << 24)
1265 );
1266 crqb->ata_cmd[3] = cpu_to_le32(
1267 (tf->nsect << 0) |
1268 (tf->hob_nsect << 8)
1269 );
1270
1271 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1272 return;
31961943
BR
1273 mv_fill_sg(qc);
1274}
1275
05b308e1
BR
1276/**
1277 * mv_qc_issue - Initiate a command to the host
1278 * @qc: queued command to start
1279 *
1280 * This routine simply redirects to the general purpose routine
1281 * if command is not DMA. Else, it sanity checks our local
1282 * caches of the request producer/consumer indices then enables
1283 * DMA and bumps the request producer index.
1284 *
1285 * LOCKING:
1286 * Inherited from caller.
1287 */
9a3d9eb0 1288static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1289{
c5d3e45a
JG
1290 struct ata_port *ap = qc->ap;
1291 void __iomem *port_mmio = mv_ap_base(ap);
1292 struct mv_port_priv *pp = ap->private_data;
1293 struct mv_host_priv *hpriv = ap->host->private_data;
a6432436 1294 unsigned in_index;
31961943
BR
1295 u32 in_ptr;
1296
c5d3e45a 1297 if (qc->tf.protocol != ATA_PROT_DMA) {
31961943
BR
1298 /* We're about to send a non-EDMA capable command to the
1299 * port. Turn off EDMA so there won't be problems accessing
1300 * shadow block, etc registers.
1301 */
c5d3e45a 1302 mv_stop_dma(ap);
31961943
BR
1303 return ata_qc_issue_prot(qc);
1304 }
1305
a6432436
ML
1306 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1307 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
31961943 1308
31961943 1309 /* until we do queuing, the queue should be empty at this point */
a6432436
ML
1310 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1311 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
31961943 1312
a6432436 1313 in_index = mv_inc_q_index(in_index); /* now incr producer index */
31961943 1314
c5d3e45a 1315 mv_start_dma(port_mmio, hpriv, pp);
31961943
BR
1316
1317 /* and write the request in pointer to kick the EDMA to life */
1318 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
a6432436 1319 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1320 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1321
1322 return 0;
1323}
1324
05b308e1
BR
1325/**
1326 * mv_get_crpb_status - get status from most recently completed cmd
1327 * @ap: ATA channel to manipulate
1328 *
1329 * This routine is for use when the port is in DMA mode, when it
1330 * will be using the CRPB (command response block) method of
beec7dbc 1331 * returning command completion information. We check indices
05b308e1
BR
1332 * are good, grab status, and bump the response consumer index to
1333 * prove that we're up to date.
1334 *
1335 * LOCKING:
1336 * Inherited from caller.
1337 */
31961943
BR
1338static u8 mv_get_crpb_status(struct ata_port *ap)
1339{
1340 void __iomem *port_mmio = mv_ap_base(ap);
1341 struct mv_port_priv *pp = ap->private_data;
a6432436 1342 unsigned out_index;
31961943 1343 u32 out_ptr;
806a6e7a 1344 u8 ata_status;
31961943 1345
a6432436
ML
1346 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1347 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
31961943 1348
a6432436
ML
1349 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1350 >> CRPB_FLAG_STATUS_SHIFT;
806a6e7a 1351
31961943 1352 /* increment our consumer index... */
a6432436 1353 out_index = mv_inc_q_index(out_index);
8b260248 1354
31961943 1355 /* and, until we do NCQ, there should only be 1 CRPB waiting */
a6432436
ML
1356 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1357 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
31961943
BR
1358
1359 /* write out our inc'd consumer index so EDMA knows we're caught up */
1360 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
a6432436 1361 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
31961943
BR
1362 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1363
1364 /* Return ATA status register for completed CRPB */
806a6e7a 1365 return ata_status;
31961943
BR
1366}
1367
05b308e1
BR
1368/**
1369 * mv_err_intr - Handle error interrupts on the port
1370 * @ap: ATA channel to manipulate
9b358e30 1371 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1372 *
1373 * In most cases, just clear the interrupt and move on. However,
1374 * some cases require an eDMA reset, which is done right before
1375 * the COMRESET in mv_phy_reset(). The SERR case requires a
1376 * clear of pending errors in the SATA SERROR register. Finally,
1377 * if the port disabled DMA, update our cached copy to match.
1378 *
1379 * LOCKING:
1380 * Inherited from caller.
1381 */
9b358e30 1382static void mv_err_intr(struct ata_port *ap, int reset_allowed)
31961943
BR
1383{
1384 void __iomem *port_mmio = mv_ap_base(ap);
1385 u32 edma_err_cause, serr = 0;
20f733e7
BR
1386
1387 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1388
1389 if (EDMA_ERR_SERR & edma_err_cause) {
81952c54
TH
1390 sata_scr_read(ap, SCR_ERROR, &serr);
1391 sata_scr_write_flush(ap, SCR_ERROR, serr);
20f733e7 1392 }
afb0edd9
BR
1393 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1394 struct mv_port_priv *pp = ap->private_data;
1395 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1396 }
1397 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
44877b4e 1398 "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
20f733e7
BR
1399
1400 /* Clear EDMA now that SERR cleanup done */
1401 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1402
1403 /* check for fatal here and recover if needed */
9b358e30 1404 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
c9d39130 1405 mv_stop_and_reset(ap);
20f733e7
BR
1406}
1407
05b308e1
BR
1408/**
1409 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1410 * @host: host specific structure
05b308e1
BR
1411 * @relevant: port error bits relevant to this host controller
1412 * @hc: which host controller we're to look at
1413 *
1414 * Read then write clear the HC interrupt status then walk each
1415 * port connected to the HC and see if it needs servicing. Port
1416 * success ints are reported in the HC interrupt status reg, the
1417 * port error ints are reported in the higher level main
1418 * interrupt status register and thus are passed in via the
1419 * 'relevant' argument.
1420 *
1421 * LOCKING:
1422 * Inherited from caller.
1423 */
cca3974e 1424static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1425{
0d5ff566 1426 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7 1427 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7
BR
1428 struct ata_queued_cmd *qc;
1429 u32 hc_irq_cause;
c5d3e45a
JG
1430 int port, port0;
1431 int shift, hard_port, handled;
a7dac447 1432 unsigned int err_mask;
20f733e7 1433
35177265 1434 if (hc == 0)
20f733e7 1435 port0 = 0;
35177265 1436 else
20f733e7 1437 port0 = MV_PORTS_PER_HC;
20f733e7
BR
1438
1439 /* we'll need the HC success int register in most cases */
1440 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
35177265 1441 if (hc_irq_cause)
31961943 1442 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1443
1444 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1445 hc,relevant,hc_irq_cause);
1446
1447 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
cd85f6e2 1448 u8 ata_status = 0;
cca3974e 1449 struct ata_port *ap = host->ports[port];
63af2a5c 1450 struct mv_port_priv *pp = ap->private_data;
55d8ca4f 1451
e857f141 1452 hard_port = mv_hardport_from_port(port); /* range 0..3 */
31961943 1453 handled = 0; /* ensure ata_status is set if handled++ */
20f733e7 1454
63af2a5c 1455 /* Note that DEV_IRQ might happen spuriously during EDMA,
e857f141
ML
1456 * and should be ignored in such cases.
1457 * The cause of this is still under investigation.
8190bdb9 1458 */
63af2a5c
ML
1459 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1460 /* EDMA: check for response queue interrupt */
1461 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1462 ata_status = mv_get_crpb_status(ap);
1463 handled = 1;
1464 }
1465 } else {
1466 /* PIO: check for device (drive) interrupt */
1467 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
0d5ff566 1468 ata_status = readb(ap->ioaddr.status_addr);
63af2a5c 1469 handled = 1;
e857f141
ML
1470 /* ignore spurious intr if drive still BUSY */
1471 if (ata_status & ATA_BUSY) {
1472 ata_status = 0;
1473 handled = 0;
1474 }
63af2a5c 1475 }
20f733e7
BR
1476 }
1477
029f5468 1478 if (ap && (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1479 continue;
1480
a7dac447
JG
1481 err_mask = ac_err_mask(ata_status);
1482
31961943 1483 shift = port << 1; /* (port * 2) */
20f733e7
BR
1484 if (port >= MV_PORTS_PER_HC) {
1485 shift++; /* skip bit 8 in the HC Main IRQ reg */
1486 }
1487 if ((PORT0_ERR << shift) & relevant) {
9b358e30 1488 mv_err_intr(ap, 1);
a7dac447 1489 err_mask |= AC_ERR_OTHER;
63af2a5c 1490 handled = 1;
20f733e7 1491 }
8b260248 1492
63af2a5c 1493 if (handled) {
20f733e7 1494 qc = ata_qc_from_tag(ap, ap->active_tag);
63af2a5c 1495 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
20f733e7
BR
1496 VPRINTK("port %u IRQ found for qc, "
1497 "ata_status 0x%x\n", port,ata_status);
20f733e7 1498 /* mark qc status appropriately */
701db69d 1499 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
a22e2eb0
AL
1500 qc->err_mask |= err_mask;
1501 ata_qc_complete(qc);
1502 }
20f733e7
BR
1503 }
1504 }
1505 }
1506 VPRINTK("EXIT\n");
1507}
1508
05b308e1 1509/**
c5d3e45a 1510 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1511 * @irq: unused
1512 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1513 *
1514 * Read the read only register to determine if any host
1515 * controllers have pending interrupts. If so, call lower level
1516 * routine to handle. Also check for PCI errors which are only
1517 * reported here.
1518 *
8b260248 1519 * LOCKING:
cca3974e 1520 * This routine holds the host lock while processing pending
05b308e1
BR
1521 * interrupts.
1522 */
7d12e780 1523static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1524{
cca3974e 1525 struct ata_host *host = dev_instance;
20f733e7 1526 unsigned int hc, handled = 0, n_hcs;
0d5ff566 1527 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
615ab953 1528 struct mv_host_priv *hpriv;
20f733e7
BR
1529 u32 irq_stat;
1530
20f733e7 1531 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
20f733e7
BR
1532
1533 /* check the cases where we either have nothing pending or have read
1534 * a bogus register value which can indicate HW removal or PCI fault
1535 */
35177265 1536 if (!irq_stat || (0xffffffffU == irq_stat))
20f733e7 1537 return IRQ_NONE;
20f733e7 1538
cca3974e
JG
1539 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1540 spin_lock(&host->lock);
20f733e7
BR
1541
1542 for (hc = 0; hc < n_hcs; hc++) {
1543 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1544 if (relevant) {
cca3974e 1545 mv_host_intr(host, relevant, hc);
31961943 1546 handled++;
20f733e7
BR
1547 }
1548 }
615ab953 1549
cca3974e 1550 hpriv = host->private_data;
615ab953
ML
1551 if (IS_60XX(hpriv)) {
1552 /* deal with the interrupt coalescing bits */
1553 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1554 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1555 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1556 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1557 }
1558 }
1559
20f733e7 1560 if (PCI_ERR & irq_stat) {
31961943
BR
1561 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1562 readl(mmio + PCI_IRQ_CAUSE_OFS));
1563
afb0edd9 1564 DPRINTK("All regs @ PCI error\n");
cca3974e 1565 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
20f733e7 1566
31961943
BR
1567 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1568 handled++;
1569 }
cca3974e 1570 spin_unlock(&host->lock);
20f733e7
BR
1571
1572 return IRQ_RETVAL(handled);
1573}
1574
c9d39130
JG
1575static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1576{
1577 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1578 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1579
1580 return hc_mmio + ofs;
1581}
1582
1583static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1584{
1585 unsigned int ofs;
1586
1587 switch (sc_reg_in) {
1588 case SCR_STATUS:
1589 case SCR_ERROR:
1590 case SCR_CONTROL:
1591 ofs = sc_reg_in * sizeof(u32);
1592 break;
1593 default:
1594 ofs = 0xffffffffU;
1595 break;
1596 }
1597 return ofs;
1598}
1599
1600static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1601{
0d5ff566
TH
1602 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1603 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1604 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1605
1606 if (ofs != 0xffffffffU)
0d5ff566 1607 return readl(addr + ofs);
c9d39130
JG
1608 else
1609 return (u32) ofs;
1610}
1611
1612static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1613{
0d5ff566
TH
1614 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1615 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1616 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1617
1618 if (ofs != 0xffffffffU)
0d5ff566 1619 writelfl(val, addr + ofs);
c9d39130
JG
1620}
1621
522479fb
JG
1622static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1623{
1624 u8 rev_id;
1625 int early_5080;
1626
1627 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1628
1629 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1630
1631 if (!early_5080) {
1632 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1633 tmp |= (1 << 0);
1634 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1635 }
1636
1637 mv_reset_pci_bus(pdev, mmio);
1638}
1639
1640static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1641{
1642 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1643}
1644
47c2b677 1645static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1646 void __iomem *mmio)
1647{
c9d39130
JG
1648 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1649 u32 tmp;
1650
1651 tmp = readl(phy_mmio + MV5_PHY_MODE);
1652
1653 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1654 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1655}
1656
47c2b677 1657static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1658{
522479fb
JG
1659 u32 tmp;
1660
1661 writel(0, mmio + MV_GPIO_PORT_CTL);
1662
1663 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1664
1665 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1666 tmp |= ~(1 << 0);
1667 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1668}
1669
2a47ce06
JG
1670static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1671 unsigned int port)
bca1c4eb 1672{
c9d39130
JG
1673 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1674 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1675 u32 tmp;
1676 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1677
1678 if (fix_apm_sq) {
1679 tmp = readl(phy_mmio + MV5_LT_MODE);
1680 tmp |= (1 << 19);
1681 writel(tmp, phy_mmio + MV5_LT_MODE);
1682
1683 tmp = readl(phy_mmio + MV5_PHY_CTL);
1684 tmp &= ~0x3;
1685 tmp |= 0x1;
1686 writel(tmp, phy_mmio + MV5_PHY_CTL);
1687 }
1688
1689 tmp = readl(phy_mmio + MV5_PHY_MODE);
1690 tmp &= ~mask;
1691 tmp |= hpriv->signal[port].pre;
1692 tmp |= hpriv->signal[port].amps;
1693 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1694}
1695
c9d39130
JG
1696
1697#undef ZERO
1698#define ZERO(reg) writel(0, port_mmio + (reg))
1699static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1700 unsigned int port)
1701{
1702 void __iomem *port_mmio = mv_port_base(mmio, port);
1703
1704 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1705
1706 mv_channel_reset(hpriv, mmio, port);
1707
1708 ZERO(0x028); /* command */
1709 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1710 ZERO(0x004); /* timer */
1711 ZERO(0x008); /* irq err cause */
1712 ZERO(0x00c); /* irq err mask */
1713 ZERO(0x010); /* rq bah */
1714 ZERO(0x014); /* rq inp */
1715 ZERO(0x018); /* rq outp */
1716 ZERO(0x01c); /* respq bah */
1717 ZERO(0x024); /* respq outp */
1718 ZERO(0x020); /* respq inp */
1719 ZERO(0x02c); /* test control */
1720 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1721}
1722#undef ZERO
1723
1724#define ZERO(reg) writel(0, hc_mmio + (reg))
1725static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1726 unsigned int hc)
47c2b677 1727{
c9d39130
JG
1728 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1729 u32 tmp;
1730
1731 ZERO(0x00c);
1732 ZERO(0x010);
1733 ZERO(0x014);
1734 ZERO(0x018);
1735
1736 tmp = readl(hc_mmio + 0x20);
1737 tmp &= 0x1c1c1c1c;
1738 tmp |= 0x03030303;
1739 writel(tmp, hc_mmio + 0x20);
1740}
1741#undef ZERO
1742
1743static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1744 unsigned int n_hc)
1745{
1746 unsigned int hc, port;
1747
1748 for (hc = 0; hc < n_hc; hc++) {
1749 for (port = 0; port < MV_PORTS_PER_HC; port++)
1750 mv5_reset_hc_port(hpriv, mmio,
1751 (hc * MV_PORTS_PER_HC) + port);
1752
1753 mv5_reset_one_hc(hpriv, mmio, hc);
1754 }
1755
1756 return 0;
47c2b677
JG
1757}
1758
101ffae2
JG
1759#undef ZERO
1760#define ZERO(reg) writel(0, mmio + (reg))
1761static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1762{
1763 u32 tmp;
1764
1765 tmp = readl(mmio + MV_PCI_MODE);
1766 tmp &= 0xff00ffff;
1767 writel(tmp, mmio + MV_PCI_MODE);
1768
1769 ZERO(MV_PCI_DISC_TIMER);
1770 ZERO(MV_PCI_MSI_TRIGGER);
1771 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1772 ZERO(HC_MAIN_IRQ_MASK_OFS);
1773 ZERO(MV_PCI_SERR_MASK);
1774 ZERO(PCI_IRQ_CAUSE_OFS);
1775 ZERO(PCI_IRQ_MASK_OFS);
1776 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1777 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1778 ZERO(MV_PCI_ERR_ATTRIBUTE);
1779 ZERO(MV_PCI_ERR_COMMAND);
1780}
1781#undef ZERO
1782
1783static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1784{
1785 u32 tmp;
1786
1787 mv5_reset_flash(hpriv, mmio);
1788
1789 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1790 tmp &= 0x3;
1791 tmp |= (1 << 5) | (1 << 6);
1792 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1793}
1794
1795/**
1796 * mv6_reset_hc - Perform the 6xxx global soft reset
1797 * @mmio: base address of the HBA
1798 *
1799 * This routine only applies to 6xxx parts.
1800 *
1801 * LOCKING:
1802 * Inherited from caller.
1803 */
c9d39130
JG
1804static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1805 unsigned int n_hc)
101ffae2
JG
1806{
1807 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1808 int i, rc = 0;
1809 u32 t;
1810
1811 /* Following procedure defined in PCI "main command and status
1812 * register" table.
1813 */
1814 t = readl(reg);
1815 writel(t | STOP_PCI_MASTER, reg);
1816
1817 for (i = 0; i < 1000; i++) {
1818 udelay(1);
1819 t = readl(reg);
1820 if (PCI_MASTER_EMPTY & t) {
1821 break;
1822 }
1823 }
1824 if (!(PCI_MASTER_EMPTY & t)) {
1825 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1826 rc = 1;
1827 goto done;
1828 }
1829
1830 /* set reset */
1831 i = 5;
1832 do {
1833 writel(t | GLOB_SFT_RST, reg);
1834 t = readl(reg);
1835 udelay(1);
1836 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1837
1838 if (!(GLOB_SFT_RST & t)) {
1839 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1840 rc = 1;
1841 goto done;
1842 }
1843
1844 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1845 i = 5;
1846 do {
1847 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1848 t = readl(reg);
1849 udelay(1);
1850 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1851
1852 if (GLOB_SFT_RST & t) {
1853 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1854 rc = 1;
1855 }
1856done:
1857 return rc;
1858}
1859
47c2b677 1860static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1861 void __iomem *mmio)
1862{
1863 void __iomem *port_mmio;
1864 u32 tmp;
1865
ba3fe8fb
JG
1866 tmp = readl(mmio + MV_RESET_CFG);
1867 if ((tmp & (1 << 0)) == 0) {
47c2b677 1868 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
1869 hpriv->signal[idx].pre = 0x1 << 5;
1870 return;
1871 }
1872
1873 port_mmio = mv_port_base(mmio, idx);
1874 tmp = readl(port_mmio + PHY_MODE2);
1875
1876 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1877 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1878}
1879
47c2b677 1880static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1881{
47c2b677 1882 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
1883}
1884
c9d39130 1885static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 1886 unsigned int port)
bca1c4eb 1887{
c9d39130
JG
1888 void __iomem *port_mmio = mv_port_base(mmio, port);
1889
bca1c4eb 1890 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
1891 int fix_phy_mode2 =
1892 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 1893 int fix_phy_mode4 =
47c2b677
JG
1894 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1895 u32 m2, tmp;
1896
1897 if (fix_phy_mode2) {
1898 m2 = readl(port_mmio + PHY_MODE2);
1899 m2 &= ~(1 << 16);
1900 m2 |= (1 << 31);
1901 writel(m2, port_mmio + PHY_MODE2);
1902
1903 udelay(200);
1904
1905 m2 = readl(port_mmio + PHY_MODE2);
1906 m2 &= ~((1 << 16) | (1 << 31));
1907 writel(m2, port_mmio + PHY_MODE2);
1908
1909 udelay(200);
1910 }
1911
1912 /* who knows what this magic does */
1913 tmp = readl(port_mmio + PHY_MODE3);
1914 tmp &= ~0x7F800000;
1915 tmp |= 0x2A800000;
1916 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
1917
1918 if (fix_phy_mode4) {
47c2b677 1919 u32 m4;
bca1c4eb
JG
1920
1921 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
1922
1923 if (hp_flags & MV_HP_ERRATA_60X1B2)
1924 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
1925
1926 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1927
1928 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
1929
1930 if (hp_flags & MV_HP_ERRATA_60X1B2)
1931 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
1932 }
1933
1934 /* Revert values of pre-emphasis and signal amps to the saved ones */
1935 m2 = readl(port_mmio + PHY_MODE2);
1936
1937 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
1938 m2 |= hpriv->signal[port].amps;
1939 m2 |= hpriv->signal[port].pre;
47c2b677 1940 m2 &= ~(1 << 16);
bca1c4eb 1941
e4e7b892
JG
1942 /* according to mvSata 3.6.1, some IIE values are fixed */
1943 if (IS_GEN_IIE(hpriv)) {
1944 m2 &= ~0xC30FF01F;
1945 m2 |= 0x0000900F;
1946 }
1947
bca1c4eb
JG
1948 writel(m2, port_mmio + PHY_MODE2);
1949}
1950
c9d39130
JG
1951static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1952 unsigned int port_no)
1953{
1954 void __iomem *port_mmio = mv_port_base(mmio, port_no);
1955
1956 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1957
1958 if (IS_60XX(hpriv)) {
1959 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
1960 ifctl |= (1 << 7); /* enable gen2i speed */
1961 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
1962 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1963 }
1964
1965 udelay(25); /* allow reset propagation */
1966
1967 /* Spec never mentions clearing the bit. Marvell's driver does
1968 * clear the bit, however.
1969 */
1970 writelfl(0, port_mmio + EDMA_CMD_OFS);
1971
1972 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1973
1974 if (IS_50XX(hpriv))
1975 mdelay(1);
1976}
1977
1978static void mv_stop_and_reset(struct ata_port *ap)
1979{
cca3974e 1980 struct mv_host_priv *hpriv = ap->host->private_data;
0d5ff566 1981 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
c9d39130
JG
1982
1983 mv_stop_dma(ap);
1984
1985 mv_channel_reset(hpriv, mmio, ap->port_no);
1986
22374677
JG
1987 __mv_phy_reset(ap, 0);
1988}
1989
1990static inline void __msleep(unsigned int msec, int can_sleep)
1991{
1992 if (can_sleep)
1993 msleep(msec);
1994 else
1995 mdelay(msec);
c9d39130
JG
1996}
1997
05b308e1 1998/**
22374677 1999 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2000 * @ap: ATA channel to manipulate
2001 *
2002 * Part of this is taken from __sata_phy_reset and modified to
2003 * not sleep since this routine gets called from interrupt level.
2004 *
2005 * LOCKING:
2006 * Inherited from caller. This is coded to safe to call at
2007 * interrupt level, i.e. it does not sleep.
31961943 2008 */
22374677 2009static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
20f733e7 2010{
095fec88 2011 struct mv_port_priv *pp = ap->private_data;
cca3974e 2012 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7
BR
2013 void __iomem *port_mmio = mv_ap_base(ap);
2014 struct ata_taskfile tf;
2015 struct ata_device *dev = &ap->device[0];
c5d3e45a 2016 unsigned long deadline;
22374677
JG
2017 int retry = 5;
2018 u32 sstatus;
20f733e7
BR
2019
2020 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2021
095fec88 2022 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
31961943
BR
2023 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2024 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
20f733e7 2025
22374677
JG
2026 /* Issue COMRESET via SControl */
2027comreset_retry:
81952c54 2028 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
22374677
JG
2029 __msleep(1, can_sleep);
2030
81952c54 2031 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
22374677
JG
2032 __msleep(20, can_sleep);
2033
c5d3e45a 2034 deadline = jiffies + msecs_to_jiffies(200);
31961943 2035 do {
81952c54 2036 sata_scr_read(ap, SCR_STATUS, &sstatus);
62f1d0e6 2037 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2038 break;
22374677
JG
2039
2040 __msleep(1, can_sleep);
c5d3e45a 2041 } while (time_before(jiffies, deadline));
20f733e7 2042
22374677
JG
2043 /* work around errata */
2044 if (IS_60XX(hpriv) &&
2045 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2046 (retry-- > 0))
2047 goto comreset_retry;
095fec88
JG
2048
2049 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
31961943
BR
2050 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2051 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2052
81952c54 2053 if (ata_port_online(ap)) {
31961943
BR
2054 ata_port_probe(ap);
2055 } else {
81952c54 2056 sata_scr_read(ap, SCR_STATUS, &sstatus);
f15a1daf
TH
2057 ata_port_printk(ap, KERN_INFO,
2058 "no device found (phy stat %08x)\n", sstatus);
31961943 2059 ata_port_disable(ap);
20f733e7
BR
2060 return;
2061 }
2062
22374677
JG
2063 /* even after SStatus reflects that device is ready,
2064 * it seems to take a while for link to be fully
2065 * established (and thus Status no longer 0x80/0x7F),
2066 * so we poll a bit for that, here.
2067 */
2068 retry = 20;
2069 while (1) {
2070 u8 drv_stat = ata_check_status(ap);
2071 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2072 break;
2073 __msleep(500, can_sleep);
2074 if (retry-- <= 0)
2075 break;
2076 }
2077
0d5ff566
TH
2078 tf.lbah = readb(ap->ioaddr.lbah_addr);
2079 tf.lbam = readb(ap->ioaddr.lbam_addr);
2080 tf.lbal = readb(ap->ioaddr.lbal_addr);
2081 tf.nsect = readb(ap->ioaddr.nsect_addr);
20f733e7
BR
2082
2083 dev->class = ata_dev_classify(&tf);
e1211e3f 2084 if (!ata_dev_enabled(dev)) {
20f733e7
BR
2085 VPRINTK("Port disabled post-sig: No device present.\n");
2086 ata_port_disable(ap);
2087 }
095fec88
JG
2088
2089 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2090
2091 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2092
bca1c4eb 2093 VPRINTK("EXIT\n");
20f733e7
BR
2094}
2095
22374677
JG
2096static void mv_phy_reset(struct ata_port *ap)
2097{
2098 __mv_phy_reset(ap, 1);
2099}
2100
05b308e1
BR
2101/**
2102 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2103 * @ap: ATA channel to manipulate
2104 *
2105 * Intent is to clear all pending error conditions, reset the
2106 * chip/bus, fail the command, and move on.
2107 *
2108 * LOCKING:
cca3974e 2109 * This routine holds the host lock while failing the command.
05b308e1 2110 */
31961943
BR
2111static void mv_eng_timeout(struct ata_port *ap)
2112{
0d5ff566 2113 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
31961943 2114 struct ata_queued_cmd *qc;
2f9719b6 2115 unsigned long flags;
31961943 2116
f15a1daf 2117 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
31961943 2118 DPRINTK("All regs @ start of eng_timeout\n");
0d5ff566 2119 mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
31961943
BR
2120
2121 qc = ata_qc_from_tag(ap, ap->active_tag);
2122 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
0d5ff566 2123 mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
31961943 2124
cca3974e 2125 spin_lock_irqsave(&ap->host->lock, flags);
9b358e30 2126 mv_err_intr(ap, 0);
c9d39130 2127 mv_stop_and_reset(ap);
cca3974e 2128 spin_unlock_irqrestore(&ap->host->lock, flags);
31961943 2129
9b358e30
ML
2130 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2131 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2132 qc->err_mask |= AC_ERR_TIMEOUT;
2133 ata_eh_qc_complete(qc);
2134 }
31961943
BR
2135}
2136
05b308e1
BR
2137/**
2138 * mv_port_init - Perform some early initialization on a single port.
2139 * @port: libata data structure storing shadow register addresses
2140 * @port_mmio: base address of the port
2141 *
2142 * Initialize shadow register mmio addresses, clear outstanding
2143 * interrupts on the port, and unmask interrupts for the future
2144 * start of the port.
2145 *
2146 * LOCKING:
2147 * Inherited from caller.
2148 */
31961943 2149static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2150{
0d5ff566 2151 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2152 unsigned serr_ofs;
2153
8b260248 2154 /* PIO related setup
31961943
BR
2155 */
2156 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2157 port->error_addr =
31961943
BR
2158 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2159 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2160 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2161 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2162 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2163 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2164 port->status_addr =
31961943
BR
2165 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2166 /* special case: control/altstatus doesn't have ATA_REG_ address */
2167 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2168
2169 /* unused: */
8d9db2d2 2170 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2171
31961943
BR
2172 /* Clear any currently outstanding port interrupt conditions */
2173 serr_ofs = mv_scr_offset(SCR_ERROR);
2174 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2175 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2176
20f733e7 2177 /* unmask all EDMA error interrupts */
31961943 2178 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2179
8b260248 2180 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2181 readl(port_mmio + EDMA_CFG_OFS),
2182 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2183 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2184}
2185
4447d351 2186static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2187{
4447d351
TH
2188 struct pci_dev *pdev = to_pci_dev(host->dev);
2189 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2190 u8 rev_id;
2191 u32 hp_flags = hpriv->hp_flags;
2192
2193 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2194
2195 switch(board_idx) {
47c2b677
JG
2196 case chip_5080:
2197 hpriv->ops = &mv5xxx_ops;
2198 hp_flags |= MV_HP_50XX;
2199
2200 switch (rev_id) {
2201 case 0x1:
2202 hp_flags |= MV_HP_ERRATA_50XXB0;
2203 break;
2204 case 0x3:
2205 hp_flags |= MV_HP_ERRATA_50XXB2;
2206 break;
2207 default:
2208 dev_printk(KERN_WARNING, &pdev->dev,
2209 "Applying 50XXB2 workarounds to unknown rev\n");
2210 hp_flags |= MV_HP_ERRATA_50XXB2;
2211 break;
2212 }
2213 break;
2214
bca1c4eb
JG
2215 case chip_504x:
2216 case chip_508x:
47c2b677 2217 hpriv->ops = &mv5xxx_ops;
bca1c4eb
JG
2218 hp_flags |= MV_HP_50XX;
2219
47c2b677
JG
2220 switch (rev_id) {
2221 case 0x0:
2222 hp_flags |= MV_HP_ERRATA_50XXB0;
2223 break;
2224 case 0x3:
2225 hp_flags |= MV_HP_ERRATA_50XXB2;
2226 break;
2227 default:
2228 dev_printk(KERN_WARNING, &pdev->dev,
2229 "Applying B2 workarounds to unknown rev\n");
2230 hp_flags |= MV_HP_ERRATA_50XXB2;
2231 break;
bca1c4eb
JG
2232 }
2233 break;
2234
2235 case chip_604x:
2236 case chip_608x:
47c2b677
JG
2237 hpriv->ops = &mv6xxx_ops;
2238
bca1c4eb 2239 switch (rev_id) {
47c2b677
JG
2240 case 0x7:
2241 hp_flags |= MV_HP_ERRATA_60X1B2;
2242 break;
2243 case 0x9:
2244 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2245 break;
2246 default:
2247 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2248 "Applying B2 workarounds to unknown rev\n");
2249 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2250 break;
2251 }
2252 break;
2253
e4e7b892
JG
2254 case chip_7042:
2255 case chip_6042:
2256 hpriv->ops = &mv6xxx_ops;
2257
2258 hp_flags |= MV_HP_GEN_IIE;
2259
2260 switch (rev_id) {
2261 case 0x0:
2262 hp_flags |= MV_HP_ERRATA_XX42A0;
2263 break;
2264 case 0x1:
2265 hp_flags |= MV_HP_ERRATA_60X1C0;
2266 break;
2267 default:
2268 dev_printk(KERN_WARNING, &pdev->dev,
2269 "Applying 60X1C0 workarounds to unknown rev\n");
2270 hp_flags |= MV_HP_ERRATA_60X1C0;
2271 break;
2272 }
2273 break;
2274
bca1c4eb
JG
2275 default:
2276 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2277 return 1;
2278 }
2279
2280 hpriv->hp_flags = hp_flags;
2281
2282 return 0;
2283}
2284
05b308e1 2285/**
47c2b677 2286 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2287 * @host: ATA host to initialize
2288 * @board_idx: controller index
05b308e1
BR
2289 *
2290 * If possible, do an early global reset of the host. Then do
2291 * our port init and clear/unmask all/relevant host interrupts.
2292 *
2293 * LOCKING:
2294 * Inherited from caller.
2295 */
4447d351 2296static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2297{
2298 int rc = 0, n_hc, port, hc;
4447d351
TH
2299 struct pci_dev *pdev = to_pci_dev(host->dev);
2300 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2301 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb 2302
47c2b677
JG
2303 /* global interrupt mask */
2304 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2305
4447d351 2306 rc = mv_chip_id(host, board_idx);
bca1c4eb
JG
2307 if (rc)
2308 goto done;
2309
4447d351 2310 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2311
4447d351 2312 for (port = 0; port < host->n_ports; port++)
47c2b677 2313 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2314
c9d39130 2315 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2316 if (rc)
20f733e7 2317 goto done;
20f733e7 2318
522479fb
JG
2319 hpriv->ops->reset_flash(hpriv, mmio);
2320 hpriv->ops->reset_bus(pdev, mmio);
47c2b677 2321 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2322
4447d351 2323 for (port = 0; port < host->n_ports; port++) {
2a47ce06 2324 if (IS_60XX(hpriv)) {
c9d39130
JG
2325 void __iomem *port_mmio = mv_port_base(mmio, port);
2326
2a47ce06 2327 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2328 ifctl |= (1 << 7); /* enable gen2i speed */
2329 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2330 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2331 }
2332
c9d39130 2333 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2334 }
2335
4447d351 2336 for (port = 0; port < host->n_ports; port++) {
2a47ce06 2337 void __iomem *port_mmio = mv_port_base(mmio, port);
4447d351 2338 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
20f733e7
BR
2339 }
2340
2341 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2342 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2343
2344 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2345 "(before clear)=0x%08x\n", hc,
2346 readl(hc_mmio + HC_CFG_OFS),
2347 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2348
2349 /* Clear any currently outstanding hc interrupt conditions */
2350 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2351 }
2352
31961943
BR
2353 /* Clear any currently outstanding host interrupt conditions */
2354 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2355
2356 /* and unmask interrupt generation for host regs */
2357 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
fb621e2f
JG
2358
2359 if (IS_50XX(hpriv))
2360 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2361 else
2362 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
2363
2364 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
8b260248 2365 "PCI int cause/mask=0x%08x/0x%08x\n",
20f733e7
BR
2366 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2367 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2368 readl(mmio + PCI_IRQ_CAUSE_OFS),
2369 readl(mmio + PCI_IRQ_MASK_OFS));
bca1c4eb 2370
31961943 2371done:
20f733e7
BR
2372 return rc;
2373}
2374
05b308e1
BR
2375/**
2376 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2377 * @host: ATA host to print info about
05b308e1
BR
2378 *
2379 * FIXME: complete this.
2380 *
2381 * LOCKING:
2382 * Inherited from caller.
2383 */
4447d351 2384static void mv_print_info(struct ata_host *host)
31961943 2385{
4447d351
TH
2386 struct pci_dev *pdev = to_pci_dev(host->dev);
2387 struct mv_host_priv *hpriv = host->private_data;
31961943 2388 u8 rev_id, scc;
c1e4fe71 2389 const char *scc_s, *gen;
31961943
BR
2390
2391 /* Use this to determine the HW stepping of the chip so we know
2392 * what errata to workaround
2393 */
2394 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2395
2396 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2397 if (scc == 0)
2398 scc_s = "SCSI";
2399 else if (scc == 0x01)
2400 scc_s = "RAID";
2401 else
c1e4fe71
JG
2402 scc_s = "?";
2403
2404 if (IS_GEN_I(hpriv))
2405 gen = "I";
2406 else if (IS_GEN_II(hpriv))
2407 gen = "II";
2408 else if (IS_GEN_IIE(hpriv))
2409 gen = "IIE";
2410 else
2411 gen = "?";
31961943 2412
a9524a76 2413 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2414 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2415 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2416 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2417}
2418
05b308e1
BR
2419/**
2420 * mv_init_one - handle a positive probe of a Marvell host
2421 * @pdev: PCI device found
2422 * @ent: PCI device ID entry for the matched host
2423 *
2424 * LOCKING:
2425 * Inherited from caller.
2426 */
20f733e7
BR
2427static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2428{
2429 static int printed_version = 0;
20f733e7 2430 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
2431 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2432 struct ata_host *host;
2433 struct mv_host_priv *hpriv;
2434 int n_ports, rc;
20f733e7 2435
a9524a76
JG
2436 if (!printed_version++)
2437 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 2438
4447d351
TH
2439 /* allocate host */
2440 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2441
2442 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2443 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2444 if (!host || !hpriv)
2445 return -ENOMEM;
2446 host->private_data = hpriv;
2447
2448 /* acquire resources */
24dc5f33
TH
2449 rc = pcim_enable_device(pdev);
2450 if (rc)
20f733e7 2451 return rc;
20f733e7 2452
0d5ff566
TH
2453 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2454 if (rc == -EBUSY)
24dc5f33 2455 pcim_pin_device(pdev);
0d5ff566 2456 if (rc)
24dc5f33 2457 return rc;
4447d351 2458 host->iomap = pcim_iomap_table(pdev);
20f733e7 2459
d88184fb
JG
2460 rc = pci_go_64(pdev);
2461 if (rc)
2462 return rc;
2463
20f733e7 2464 /* initialize adapter */
4447d351 2465 rc = mv_init_host(host, board_idx);
24dc5f33
TH
2466 if (rc)
2467 return rc;
20f733e7 2468
31961943 2469 /* Enable interrupts */
6a59dcf8 2470 if (msi && pci_enable_msi(pdev))
31961943 2471 pci_intx(pdev, 1);
20f733e7 2472
31961943 2473 mv_dump_pci_cfg(pdev, 0x68);
4447d351 2474 mv_print_info(host);
20f733e7 2475
4447d351
TH
2476 pci_set_master(pdev);
2477 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 2478 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7
BR
2479}
2480
2481static int __init mv_init(void)
2482{
b7887196 2483 return pci_register_driver(&mv_pci_driver);
20f733e7
BR
2484}
2485
2486static void __exit mv_exit(void)
2487{
2488 pci_unregister_driver(&mv_pci_driver);
2489}
2490
2491MODULE_AUTHOR("Brett Russ");
2492MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2493MODULE_LICENSE("GPL");
2494MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2495MODULE_VERSION(DRV_VERSION);
2496
ddef9bb3
JG
2497module_param(msi, int, 0444);
2498MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2499
20f733e7
BR
2500module_init(mv_init);
2501module_exit(mv_exit);