]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/sata_mv.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ericvh...
[net-next-2.6.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
4a05e209
JG
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
4a05e209
JG
38 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
20f733e7
BR
61#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
20f733e7 68#include <linux/dma-mapping.h>
a9524a76 69#include <linux/device.h>
20f733e7 70#include <scsi/scsi_host.h>
193515d5 71#include <scsi/scsi_cmnd.h>
6c08772e 72#include <scsi/scsi_device.h>
20f733e7 73#include <linux/libata.h>
20f733e7
BR
74
75#define DRV_NAME "sata_mv"
6c08772e 76#define DRV_VERSION "1.01"
20f733e7
BR
77
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
20f733e7 95 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 96 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
20f733e7
BR
99
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
31961943
BR
105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
20f733e7
BR
119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
31961943 122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
c5d3e45a 128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
47c2b677 131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 132
31961943
BR
133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
20f733e7
BR
147 /* PCI interface registers */
148
31961943
BR
149 PCI_COMMAND_OFS = 0xc00,
150
20f733e7
BR
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
522479fb
JG
156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
171 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
172 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
173 PORT0_ERR = (1 << 0), /* shift by port # */
174 PORT0_DONE = (1 << 1), /* shift by port # */
175 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
176 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
177 PCI_ERR = (1 << 18),
178 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
179 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
180 PORTS_0_3_COAL_DONE = (1 << 8),
181 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
182 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
183 GPIO_INT = (1 << 22),
184 SELF_INT = (1 << 23),
185 TWSI_INT = (1 << 24),
186 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 187 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
8b260248 188 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
189 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
190 HC_MAIN_RSVD),
fb621e2f
JG
191 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
192 HC_MAIN_RSVD_5),
20f733e7
BR
193
194 /* SATAHC registers */
195 HC_CFG_OFS = 0,
196
197 HC_IRQ_CAUSE_OFS = 0x14,
31961943 198 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
199 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
200 DEV_IRQ = (1 << 8), /* shift by port # */
201
202 /* Shadow block registers */
31961943
BR
203 SHD_BLK_OFS = 0x100,
204 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
205
206 /* SATA registers */
207 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
208 SATA_ACTIVE_OFS = 0x350,
47c2b677 209 PHY_MODE3 = 0x310,
bca1c4eb
JG
210 PHY_MODE4 = 0x314,
211 PHY_MODE2 = 0x330,
c9d39130
JG
212 MV5_PHY_MODE = 0x74,
213 MV5_LT_MODE = 0x30,
214 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
215 SATA_INTERFACE_CTL = 0x050,
216
217 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
218
219 /* Port registers */
220 EDMA_CFG_OFS = 0,
31961943
BR
221 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
222 EDMA_CFG_NCQ = (1 << 5),
223 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
224 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
225 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
226
227 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
228 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
229 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
230 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
231 EDMA_ERR_DEV = (1 << 2), /* device error */
232 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
233 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
234 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
235 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
236 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 237 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 238 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
239 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
240 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
241 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
242 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
243 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
20f733e7 244 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
6c1153e0
JG
245 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
246 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
247 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
248 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
249 EDMA_ERR_OVERRUN_5 = (1 << 5),
250 EDMA_ERR_UNDERRUN_5 = (1 << 6),
bdd4ddde
JG
251 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
252 EDMA_ERR_PRD_PAR |
253 EDMA_ERR_DEV_DCON |
254 EDMA_ERR_DEV_CON |
255 EDMA_ERR_SERR |
256 EDMA_ERR_SELF_DIS |
6c1153e0 257 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
258 EDMA_ERR_CRPB_PAR |
259 EDMA_ERR_INTRL_PAR |
260 EDMA_ERR_IORDY |
261 EDMA_ERR_LNK_CTRL_RX_2 |
262 EDMA_ERR_LNK_DATA_RX |
263 EDMA_ERR_LNK_DATA_TX |
264 EDMA_ERR_TRANS_PROTO,
265 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
266 EDMA_ERR_PRD_PAR |
267 EDMA_ERR_DEV_DCON |
268 EDMA_ERR_DEV_CON |
269 EDMA_ERR_OVERRUN_5 |
270 EDMA_ERR_UNDERRUN_5 |
271 EDMA_ERR_SELF_DIS_5 |
6c1153e0 272 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
273 EDMA_ERR_CRPB_PAR |
274 EDMA_ERR_INTRL_PAR |
275 EDMA_ERR_IORDY,
20f733e7 276
31961943
BR
277 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
278 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
279
280 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
281 EDMA_REQ_Q_PTR_SHIFT = 5,
282
283 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
284 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
285 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
286 EDMA_RSP_Q_PTR_SHIFT = 3,
287
0ea9e179
JG
288 EDMA_CMD_OFS = 0x28, /* EDMA command register */
289 EDMA_EN = (1 << 0), /* enable EDMA */
290 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
291 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 292
c9d39130 293 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 294 EDMA_ARB_CFG = 0x38,
bca1c4eb 295
31961943
BR
296 /* Host private flags (hp_flags) */
297 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
298 MV_HP_ERRATA_50XXB0 = (1 << 1),
299 MV_HP_ERRATA_50XXB2 = (1 << 2),
300 MV_HP_ERRATA_60X1B2 = (1 << 3),
301 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 302 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
303 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
304 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
305 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
20f733e7 306
31961943 307 /* Port private flags (pp_flags) */
0ea9e179
JG
308 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
309 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
310};
311
ee9ccdf7
JG
312#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
313#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 314#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
bca1c4eb 315
095fec88 316enum {
baf14aa1
JG
317 /* DMA boundary 0xffff is required by the s/g splitting
318 * we need on /length/ in mv_fill-sg().
319 */
320 MV_DMA_BOUNDARY = 0xffffU,
095fec88 321
0ea9e179
JG
322 /* mask of register bits containing lower 32 bits
323 * of EDMA request queue DMA address
324 */
095fec88
JG
325 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
326
0ea9e179 327 /* ditto, for response queue */
095fec88
JG
328 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
329};
330
522479fb
JG
331enum chip_type {
332 chip_504x,
333 chip_508x,
334 chip_5080,
335 chip_604x,
336 chip_608x,
e4e7b892
JG
337 chip_6042,
338 chip_7042,
522479fb
JG
339};
340
31961943
BR
341/* Command ReQuest Block: 32B */
342struct mv_crqb {
e1469874
ML
343 __le32 sg_addr;
344 __le32 sg_addr_hi;
345 __le16 ctrl_flags;
346 __le16 ata_cmd[11];
31961943 347};
20f733e7 348
e4e7b892 349struct mv_crqb_iie {
e1469874
ML
350 __le32 addr;
351 __le32 addr_hi;
352 __le32 flags;
353 __le32 len;
354 __le32 ata_cmd[4];
e4e7b892
JG
355};
356
31961943
BR
357/* Command ResPonse Block: 8B */
358struct mv_crpb {
e1469874
ML
359 __le16 id;
360 __le16 flags;
361 __le32 tmstmp;
20f733e7
BR
362};
363
31961943
BR
364/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
365struct mv_sg {
e1469874
ML
366 __le32 addr;
367 __le32 flags_size;
368 __le32 addr_hi;
369 __le32 reserved;
31961943 370};
20f733e7 371
31961943
BR
372struct mv_port_priv {
373 struct mv_crqb *crqb;
374 dma_addr_t crqb_dma;
375 struct mv_crpb *crpb;
376 dma_addr_t crpb_dma;
377 struct mv_sg *sg_tbl;
378 dma_addr_t sg_tbl_dma;
bdd4ddde
JG
379
380 unsigned int req_idx;
381 unsigned int resp_idx;
382
31961943
BR
383 u32 pp_flags;
384};
385
bca1c4eb
JG
386struct mv_port_signal {
387 u32 amps;
388 u32 pre;
389};
390
47c2b677
JG
391struct mv_host_priv;
392struct mv_hw_ops {
2a47ce06
JG
393 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
394 unsigned int port);
47c2b677
JG
395 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
396 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
397 void __iomem *mmio);
c9d39130
JG
398 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
399 unsigned int n_hc);
522479fb
JG
400 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
401 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
47c2b677
JG
402};
403
31961943
BR
404struct mv_host_priv {
405 u32 hp_flags;
bca1c4eb 406 struct mv_port_signal signal[8];
47c2b677 407 const struct mv_hw_ops *ops;
20f733e7
BR
408};
409
410static void mv_irq_clear(struct ata_port *ap);
da3dbb17
TH
411static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
412static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
413static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
414static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
415static int mv_port_start(struct ata_port *ap);
416static void mv_port_stop(struct ata_port *ap);
417static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 418static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 419static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde
JG
420static void mv_error_handler(struct ata_port *ap);
421static void mv_post_int_cmd(struct ata_queued_cmd *qc);
422static void mv_eh_freeze(struct ata_port *ap);
423static void mv_eh_thaw(struct ata_port *ap);
20f733e7
BR
424static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
425
2a47ce06
JG
426static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
427 unsigned int port);
47c2b677
JG
428static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
429static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
430 void __iomem *mmio);
c9d39130
JG
431static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
432 unsigned int n_hc);
522479fb
JG
433static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
434static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
47c2b677 435
2a47ce06
JG
436static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
437 unsigned int port);
47c2b677
JG
438static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
439static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
440 void __iomem *mmio);
c9d39130
JG
441static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
442 unsigned int n_hc);
522479fb
JG
443static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
444static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
c9d39130
JG
445static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
446 unsigned int port_no);
47c2b677 447
c5d3e45a
JG
448static struct scsi_host_template mv5_sht = {
449 .module = THIS_MODULE,
450 .name = DRV_NAME,
451 .ioctl = ata_scsi_ioctl,
452 .queuecommand = ata_scsi_queuecmd,
453 .can_queue = ATA_DEF_QUEUE,
454 .this_id = ATA_SHT_THIS_ID,
baf14aa1 455 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a
JG
456 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
457 .emulated = ATA_SHT_EMULATED,
458 .use_clustering = 1,
459 .proc_name = DRV_NAME,
460 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 461 .slave_configure = ata_scsi_slave_config,
c5d3e45a
JG
462 .slave_destroy = ata_scsi_slave_destroy,
463 .bios_param = ata_std_bios_param,
464};
465
466static struct scsi_host_template mv6_sht = {
20f733e7
BR
467 .module = THIS_MODULE,
468 .name = DRV_NAME,
469 .ioctl = ata_scsi_ioctl,
470 .queuecommand = ata_scsi_queuecmd,
c5d3e45a 471 .can_queue = ATA_DEF_QUEUE,
20f733e7 472 .this_id = ATA_SHT_THIS_ID,
baf14aa1 473 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7
BR
474 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
475 .emulated = ATA_SHT_EMULATED,
d88184fb 476 .use_clustering = 1,
20f733e7
BR
477 .proc_name = DRV_NAME,
478 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 479 .slave_configure = ata_scsi_slave_config,
ccf68c34 480 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 481 .bios_param = ata_std_bios_param,
20f733e7
BR
482};
483
c9d39130 484static const struct ata_port_operations mv5_ops = {
c9d39130
JG
485 .tf_load = ata_tf_load,
486 .tf_read = ata_tf_read,
487 .check_status = ata_check_status,
488 .exec_command = ata_exec_command,
489 .dev_select = ata_std_dev_select,
490
cffacd85 491 .cable_detect = ata_cable_sata,
c9d39130
JG
492
493 .qc_prep = mv_qc_prep,
494 .qc_issue = mv_qc_issue,
0d5ff566 495 .data_xfer = ata_data_xfer,
c9d39130 496
c9d39130 497 .irq_clear = mv_irq_clear,
246ce3b6 498 .irq_on = ata_irq_on,
c9d39130 499
bdd4ddde
JG
500 .error_handler = mv_error_handler,
501 .post_internal_cmd = mv_post_int_cmd,
502 .freeze = mv_eh_freeze,
503 .thaw = mv_eh_thaw,
504
c9d39130
JG
505 .scr_read = mv5_scr_read,
506 .scr_write = mv5_scr_write,
507
508 .port_start = mv_port_start,
509 .port_stop = mv_port_stop,
c9d39130
JG
510};
511
512static const struct ata_port_operations mv6_ops = {
20f733e7
BR
513 .tf_load = ata_tf_load,
514 .tf_read = ata_tf_read,
515 .check_status = ata_check_status,
516 .exec_command = ata_exec_command,
517 .dev_select = ata_std_dev_select,
518
cffacd85 519 .cable_detect = ata_cable_sata,
20f733e7 520
31961943
BR
521 .qc_prep = mv_qc_prep,
522 .qc_issue = mv_qc_issue,
0d5ff566 523 .data_xfer = ata_data_xfer,
20f733e7 524
20f733e7 525 .irq_clear = mv_irq_clear,
246ce3b6 526 .irq_on = ata_irq_on,
20f733e7 527
bdd4ddde
JG
528 .error_handler = mv_error_handler,
529 .post_internal_cmd = mv_post_int_cmd,
530 .freeze = mv_eh_freeze,
531 .thaw = mv_eh_thaw,
532
20f733e7
BR
533 .scr_read = mv_scr_read,
534 .scr_write = mv_scr_write,
535
31961943
BR
536 .port_start = mv_port_start,
537 .port_stop = mv_port_stop,
20f733e7
BR
538};
539
e4e7b892 540static const struct ata_port_operations mv_iie_ops = {
e4e7b892
JG
541 .tf_load = ata_tf_load,
542 .tf_read = ata_tf_read,
543 .check_status = ata_check_status,
544 .exec_command = ata_exec_command,
545 .dev_select = ata_std_dev_select,
546
cffacd85 547 .cable_detect = ata_cable_sata,
e4e7b892
JG
548
549 .qc_prep = mv_qc_prep_iie,
550 .qc_issue = mv_qc_issue,
0d5ff566 551 .data_xfer = ata_data_xfer,
e4e7b892 552
e4e7b892 553 .irq_clear = mv_irq_clear,
246ce3b6 554 .irq_on = ata_irq_on,
e4e7b892 555
bdd4ddde
JG
556 .error_handler = mv_error_handler,
557 .post_internal_cmd = mv_post_int_cmd,
558 .freeze = mv_eh_freeze,
559 .thaw = mv_eh_thaw,
560
e4e7b892
JG
561 .scr_read = mv_scr_read,
562 .scr_write = mv_scr_write,
563
564 .port_start = mv_port_start,
565 .port_stop = mv_port_stop,
e4e7b892
JG
566};
567
98ac62de 568static const struct ata_port_info mv_port_info[] = {
20f733e7 569 { /* chip_504x */
cca3974e 570 .flags = MV_COMMON_FLAGS,
31961943 571 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 572 .udma_mask = ATA_UDMA6,
c9d39130 573 .port_ops = &mv5_ops,
20f733e7
BR
574 },
575 { /* chip_508x */
c5d3e45a 576 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 577 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 578 .udma_mask = ATA_UDMA6,
c9d39130 579 .port_ops = &mv5_ops,
20f733e7 580 },
47c2b677 581 { /* chip_5080 */
c5d3e45a 582 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 583 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 584 .udma_mask = ATA_UDMA6,
c9d39130 585 .port_ops = &mv5_ops,
47c2b677 586 },
20f733e7 587 { /* chip_604x */
c5d3e45a 588 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
31961943 589 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 590 .udma_mask = ATA_UDMA6,
c9d39130 591 .port_ops = &mv6_ops,
20f733e7
BR
592 },
593 { /* chip_608x */
c5d3e45a
JG
594 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
595 MV_FLAG_DUAL_HC,
31961943 596 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 597 .udma_mask = ATA_UDMA6,
c9d39130 598 .port_ops = &mv6_ops,
20f733e7 599 },
e4e7b892 600 { /* chip_6042 */
c5d3e45a 601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 602 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 603 .udma_mask = ATA_UDMA6,
e4e7b892
JG
604 .port_ops = &mv_iie_ops,
605 },
606 { /* chip_7042 */
c5d3e45a 607 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 608 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 609 .udma_mask = ATA_UDMA6,
e4e7b892
JG
610 .port_ops = &mv_iie_ops,
611 },
20f733e7
BR
612};
613
3b7d697d 614static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
615 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
616 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
617 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
618 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
619 /* RocketRAID 1740/174x have different identifiers */
620 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
621 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
622
623 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
624 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
625 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
626 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
627 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
628
629 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
630
d9f9c6bc
FA
631 /* Adaptec 1430SA */
632 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
633
e93f09dc
OJ
634 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
635
6a3d586d
MT
636 /* add Marvell 7042 support */
637 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
638
2d2744fc 639 { } /* terminate list */
20f733e7
BR
640};
641
642static struct pci_driver mv_pci_driver = {
643 .name = DRV_NAME,
644 .id_table = mv_pci_tbl,
645 .probe = mv_init_one,
646 .remove = ata_pci_remove_one,
647};
648
47c2b677
JG
649static const struct mv_hw_ops mv5xxx_ops = {
650 .phy_errata = mv5_phy_errata,
651 .enable_leds = mv5_enable_leds,
652 .read_preamp = mv5_read_preamp,
653 .reset_hc = mv5_reset_hc,
522479fb
JG
654 .reset_flash = mv5_reset_flash,
655 .reset_bus = mv5_reset_bus,
47c2b677
JG
656};
657
658static const struct mv_hw_ops mv6xxx_ops = {
659 .phy_errata = mv6_phy_errata,
660 .enable_leds = mv6_enable_leds,
661 .read_preamp = mv6_read_preamp,
662 .reset_hc = mv6_reset_hc,
522479fb
JG
663 .reset_flash = mv6_reset_flash,
664 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
665};
666
ddef9bb3
JG
667/*
668 * module options
669 */
670static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
671
672
d88184fb
JG
673/* move to PCI layer or libata core? */
674static int pci_go_64(struct pci_dev *pdev)
675{
676 int rc;
677
678 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
679 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
680 if (rc) {
681 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
682 if (rc) {
683 dev_printk(KERN_ERR, &pdev->dev,
684 "64-bit DMA enable failed\n");
685 return rc;
686 }
687 }
688 } else {
689 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
690 if (rc) {
691 dev_printk(KERN_ERR, &pdev->dev,
692 "32-bit DMA enable failed\n");
693 return rc;
694 }
695 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
696 if (rc) {
697 dev_printk(KERN_ERR, &pdev->dev,
698 "32-bit consistent DMA enable failed\n");
699 return rc;
700 }
701 }
702
703 return rc;
704}
705
20f733e7
BR
706/*
707 * Functions
708 */
709
710static inline void writelfl(unsigned long data, void __iomem *addr)
711{
712 writel(data, addr);
713 (void) readl(addr); /* flush to avoid PCI posted write */
714}
715
20f733e7
BR
716static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
717{
718 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
719}
720
c9d39130
JG
721static inline unsigned int mv_hc_from_port(unsigned int port)
722{
723 return port >> MV_PORT_HC_SHIFT;
724}
725
726static inline unsigned int mv_hardport_from_port(unsigned int port)
727{
728 return port & MV_PORT_MASK;
729}
730
731static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
732 unsigned int port)
733{
734 return mv_hc_base(base, mv_hc_from_port(port));
735}
736
20f733e7
BR
737static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
738{
c9d39130 739 return mv_hc_base_from_port(base, port) +
8b260248 740 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 741 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
742}
743
744static inline void __iomem *mv_ap_base(struct ata_port *ap)
745{
0d5ff566 746 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
20f733e7
BR
747}
748
cca3974e 749static inline int mv_get_hc_count(unsigned long port_flags)
31961943 750{
cca3974e 751 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
752}
753
754static void mv_irq_clear(struct ata_port *ap)
20f733e7 755{
20f733e7
BR
756}
757
c5d3e45a
JG
758static void mv_set_edma_ptrs(void __iomem *port_mmio,
759 struct mv_host_priv *hpriv,
760 struct mv_port_priv *pp)
761{
bdd4ddde
JG
762 u32 index;
763
c5d3e45a
JG
764 /*
765 * initialize request queue
766 */
bdd4ddde
JG
767 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
768
c5d3e45a
JG
769 WARN_ON(pp->crqb_dma & 0x3ff);
770 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 771 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
772 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
773
774 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 775 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
776 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
777 else
bdd4ddde 778 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
779
780 /*
781 * initialize response queue
782 */
bdd4ddde
JG
783 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
784
c5d3e45a
JG
785 WARN_ON(pp->crpb_dma & 0xff);
786 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
787
788 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 789 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
790 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
791 else
bdd4ddde 792 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 793
bdd4ddde 794 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 795 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
796}
797
05b308e1
BR
798/**
799 * mv_start_dma - Enable eDMA engine
800 * @base: port base address
801 * @pp: port private data
802 *
beec7dbc
TH
803 * Verify the local cache of the eDMA state is accurate with a
804 * WARN_ON.
05b308e1
BR
805 *
806 * LOCKING:
807 * Inherited from caller.
808 */
c5d3e45a
JG
809static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
810 struct mv_port_priv *pp)
20f733e7 811{
c5d3e45a 812 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
bdd4ddde
JG
813 /* clear EDMA event indicators, if any */
814 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
815
816 mv_set_edma_ptrs(base, hpriv, pp);
817
afb0edd9
BR
818 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
819 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
820 }
beec7dbc 821 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
20f733e7
BR
822}
823
05b308e1 824/**
0ea9e179 825 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
826 * @ap: ATA channel to manipulate
827 *
beec7dbc
TH
828 * Verify the local cache of the eDMA state is accurate with a
829 * WARN_ON.
05b308e1
BR
830 *
831 * LOCKING:
832 * Inherited from caller.
833 */
0ea9e179 834static int __mv_stop_dma(struct ata_port *ap)
20f733e7 835{
31961943
BR
836 void __iomem *port_mmio = mv_ap_base(ap);
837 struct mv_port_priv *pp = ap->private_data;
31961943 838 u32 reg;
c5d3e45a 839 int i, err = 0;
31961943 840
4537deb5 841 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 842 /* Disable EDMA if active. The disable bit auto clears.
31961943 843 */
31961943
BR
844 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
845 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 846 } else {
beec7dbc 847 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
afb0edd9 848 }
8b260248 849
31961943
BR
850 /* now properly wait for the eDMA to stop */
851 for (i = 1000; i > 0; i--) {
852 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 853 if (!(reg & EDMA_EN))
31961943 854 break;
4537deb5 855
31961943
BR
856 udelay(100);
857 }
858
c5d3e45a 859 if (reg & EDMA_EN) {
f15a1daf 860 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 861 err = -EIO;
31961943 862 }
c5d3e45a
JG
863
864 return err;
20f733e7
BR
865}
866
0ea9e179
JG
867static int mv_stop_dma(struct ata_port *ap)
868{
869 unsigned long flags;
870 int rc;
871
872 spin_lock_irqsave(&ap->host->lock, flags);
873 rc = __mv_stop_dma(ap);
874 spin_unlock_irqrestore(&ap->host->lock, flags);
875
876 return rc;
877}
878
8a70f8dc 879#ifdef ATA_DEBUG
31961943 880static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 881{
31961943
BR
882 int b, w;
883 for (b = 0; b < bytes; ) {
884 DPRINTK("%p: ", start + b);
885 for (w = 0; b < bytes && w < 4; w++) {
886 printk("%08x ",readl(start + b));
887 b += sizeof(u32);
888 }
889 printk("\n");
890 }
31961943 891}
8a70f8dc
JG
892#endif
893
31961943
BR
894static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
895{
896#ifdef ATA_DEBUG
897 int b, w;
898 u32 dw;
899 for (b = 0; b < bytes; ) {
900 DPRINTK("%02x: ", b);
901 for (w = 0; b < bytes && w < 4; w++) {
902 (void) pci_read_config_dword(pdev,b,&dw);
903 printk("%08x ",dw);
904 b += sizeof(u32);
905 }
906 printk("\n");
907 }
908#endif
909}
910static void mv_dump_all_regs(void __iomem *mmio_base, int port,
911 struct pci_dev *pdev)
912{
913#ifdef ATA_DEBUG
8b260248 914 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
915 port >> MV_PORT_HC_SHIFT);
916 void __iomem *port_base;
917 int start_port, num_ports, p, start_hc, num_hcs, hc;
918
919 if (0 > port) {
920 start_hc = start_port = 0;
921 num_ports = 8; /* shld be benign for 4 port devs */
922 num_hcs = 2;
923 } else {
924 start_hc = port >> MV_PORT_HC_SHIFT;
925 start_port = port;
926 num_ports = num_hcs = 1;
927 }
8b260248 928 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
929 num_ports > 1 ? num_ports - 1 : start_port);
930
931 if (NULL != pdev) {
932 DPRINTK("PCI config space regs:\n");
933 mv_dump_pci_cfg(pdev, 0x68);
934 }
935 DPRINTK("PCI regs:\n");
936 mv_dump_mem(mmio_base+0xc00, 0x3c);
937 mv_dump_mem(mmio_base+0xd00, 0x34);
938 mv_dump_mem(mmio_base+0xf00, 0x4);
939 mv_dump_mem(mmio_base+0x1d00, 0x6c);
940 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 941 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
942 DPRINTK("HC regs (HC %i):\n", hc);
943 mv_dump_mem(hc_base, 0x1c);
944 }
945 for (p = start_port; p < start_port + num_ports; p++) {
946 port_base = mv_port_base(mmio_base, p);
947 DPRINTK("EDMA regs (port %i):\n",p);
948 mv_dump_mem(port_base, 0x54);
949 DPRINTK("SATA regs (port %i):\n",p);
950 mv_dump_mem(port_base+0x300, 0x60);
951 }
952#endif
20f733e7
BR
953}
954
955static unsigned int mv_scr_offset(unsigned int sc_reg_in)
956{
957 unsigned int ofs;
958
959 switch (sc_reg_in) {
960 case SCR_STATUS:
961 case SCR_CONTROL:
962 case SCR_ERROR:
963 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
964 break;
965 case SCR_ACTIVE:
966 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
967 break;
968 default:
969 ofs = 0xffffffffU;
970 break;
971 }
972 return ofs;
973}
974
da3dbb17 975static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
976{
977 unsigned int ofs = mv_scr_offset(sc_reg_in);
978
da3dbb17
TH
979 if (ofs != 0xffffffffU) {
980 *val = readl(mv_ap_base(ap) + ofs);
981 return 0;
982 } else
983 return -EINVAL;
20f733e7
BR
984}
985
da3dbb17 986static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
987{
988 unsigned int ofs = mv_scr_offset(sc_reg_in);
989
da3dbb17 990 if (ofs != 0xffffffffU) {
20f733e7 991 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
992 return 0;
993 } else
994 return -EINVAL;
20f733e7
BR
995}
996
c5d3e45a
JG
997static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
998 void __iomem *port_mmio)
e4e7b892
JG
999{
1000 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1001
1002 /* set up non-NCQ EDMA configuration */
c5d3e45a 1003 cfg &= ~(1 << 9); /* disable eQue */
e4e7b892 1004
e728eabe
JG
1005 if (IS_GEN_I(hpriv)) {
1006 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1007 cfg |= (1 << 8); /* enab config burst size mask */
e728eabe 1008 }
e4e7b892 1009
e728eabe
JG
1010 else if (IS_GEN_II(hpriv)) {
1011 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1012 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
e728eabe
JG
1013 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1014 }
e4e7b892
JG
1015
1016 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1017 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1018 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892
JG
1019 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1020 cfg |= (1 << 18); /* enab early completion */
e728eabe
JG
1021 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1022 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
4537deb5 1023 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
e4e7b892
JG
1024 }
1025
1026 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1027}
1028
05b308e1
BR
1029/**
1030 * mv_port_start - Port specific init/start routine.
1031 * @ap: ATA channel to manipulate
1032 *
1033 * Allocate and point to DMA memory, init port private memory,
1034 * zero indices.
1035 *
1036 * LOCKING:
1037 * Inherited from caller.
1038 */
31961943
BR
1039static int mv_port_start(struct ata_port *ap)
1040{
cca3974e
JG
1041 struct device *dev = ap->host->dev;
1042 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1043 struct mv_port_priv *pp;
1044 void __iomem *port_mmio = mv_ap_base(ap);
1045 void *mem;
1046 dma_addr_t mem_dma;
0ea9e179 1047 unsigned long flags;
24dc5f33 1048 int rc;
31961943 1049
24dc5f33 1050 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1051 if (!pp)
24dc5f33 1052 return -ENOMEM;
31961943 1053
24dc5f33
TH
1054 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1055 GFP_KERNEL);
6037d6bb 1056 if (!mem)
24dc5f33 1057 return -ENOMEM;
31961943
BR
1058 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1059
6037d6bb
JG
1060 rc = ata_pad_alloc(ap, dev);
1061 if (rc)
24dc5f33 1062 return rc;
6037d6bb 1063
8b260248 1064 /* First item in chunk of DMA memory:
31961943
BR
1065 * 32-slot command request table (CRQB), 32 bytes each in size
1066 */
1067 pp->crqb = mem;
1068 pp->crqb_dma = mem_dma;
1069 mem += MV_CRQB_Q_SZ;
1070 mem_dma += MV_CRQB_Q_SZ;
1071
8b260248 1072 /* Second item:
31961943
BR
1073 * 32-slot command response table (CRPB), 8 bytes each in size
1074 */
1075 pp->crpb = mem;
1076 pp->crpb_dma = mem_dma;
1077 mem += MV_CRPB_Q_SZ;
1078 mem_dma += MV_CRPB_Q_SZ;
1079
1080 /* Third item:
1081 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1082 */
1083 pp->sg_tbl = mem;
1084 pp->sg_tbl_dma = mem_dma;
1085
0ea9e179
JG
1086 spin_lock_irqsave(&ap->host->lock, flags);
1087
c5d3e45a 1088 mv_edma_cfg(ap, hpriv, port_mmio);
e4e7b892 1089
c5d3e45a 1090 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1091
0ea9e179
JG
1092 spin_unlock_irqrestore(&ap->host->lock, flags);
1093
31961943
BR
1094 /* Don't turn on EDMA here...do it before DMA commands only. Else
1095 * we'll be unable to send non-data, PIO, etc due to restricted access
1096 * to shadow regs.
1097 */
1098 ap->private_data = pp;
1099 return 0;
1100}
1101
05b308e1
BR
1102/**
1103 * mv_port_stop - Port specific cleanup/stop routine.
1104 * @ap: ATA channel to manipulate
1105 *
1106 * Stop DMA, cleanup port memory.
1107 *
1108 * LOCKING:
cca3974e 1109 * This routine uses the host lock to protect the DMA stop.
05b308e1 1110 */
31961943
BR
1111static void mv_port_stop(struct ata_port *ap)
1112{
31961943 1113 mv_stop_dma(ap);
31961943
BR
1114}
1115
05b308e1
BR
1116/**
1117 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1118 * @qc: queued command whose SG list to source from
1119 *
1120 * Populate the SG list and mark the last entry.
1121 *
1122 * LOCKING:
1123 * Inherited from caller.
1124 */
6c08772e 1125static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1126{
1127 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1128 struct scatterlist *sg;
3be6cbd7 1129 struct mv_sg *mv_sg, *last_sg = NULL;
31961943 1130
d88184fb 1131 mv_sg = pp->sg_tbl;
972c26bd 1132 ata_for_each_sg(sg, qc) {
d88184fb
JG
1133 dma_addr_t addr = sg_dma_address(sg);
1134 u32 sg_len = sg_dma_len(sg);
22374677 1135
4007b493
OJ
1136 while (sg_len) {
1137 u32 offset = addr & 0xffff;
1138 u32 len = sg_len;
22374677 1139
4007b493
OJ
1140 if ((offset + sg_len > 0x10000))
1141 len = 0x10000 - offset;
1142
1143 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1144 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1145 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1146
1147 sg_len -= len;
1148 addr += len;
1149
3be6cbd7 1150 last_sg = mv_sg;
4007b493 1151 mv_sg++;
4007b493 1152 }
31961943 1153 }
3be6cbd7
JG
1154
1155 if (likely(last_sg))
1156 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1157}
1158
e1469874 1159static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1160{
559eedad 1161 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1162 (last ? CRQB_CMD_LAST : 0);
559eedad 1163 *cmdw = cpu_to_le16(tmp);
31961943
BR
1164}
1165
05b308e1
BR
1166/**
1167 * mv_qc_prep - Host specific command preparation.
1168 * @qc: queued command to prepare
1169 *
1170 * This routine simply redirects to the general purpose routine
1171 * if command is not DMA. Else, it handles prep of the CRQB
1172 * (command request block), does some sanity checking, and calls
1173 * the SG load routine.
1174 *
1175 * LOCKING:
1176 * Inherited from caller.
1177 */
31961943
BR
1178static void mv_qc_prep(struct ata_queued_cmd *qc)
1179{
1180 struct ata_port *ap = qc->ap;
1181 struct mv_port_priv *pp = ap->private_data;
e1469874 1182 __le16 *cw;
31961943
BR
1183 struct ata_taskfile *tf;
1184 u16 flags = 0;
a6432436 1185 unsigned in_index;
31961943 1186
c5d3e45a 1187 if (qc->tf.protocol != ATA_PROT_DMA)
31961943 1188 return;
20f733e7 1189
31961943
BR
1190 /* Fill in command request block
1191 */
e4e7b892 1192 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1193 flags |= CRQB_FLAG_READ;
beec7dbc 1194 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1195 flags |= qc->tag << CRQB_TAG_SHIFT;
4537deb5 1196 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
31961943 1197
bdd4ddde
JG
1198 /* get current queue index from software */
1199 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1200
1201 pp->crqb[in_index].sg_addr =
31961943 1202 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
a6432436 1203 pp->crqb[in_index].sg_addr_hi =
31961943 1204 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
a6432436 1205 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1206
a6432436 1207 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1208 tf = &qc->tf;
1209
1210 /* Sadly, the CRQB cannot accomodate all registers--there are
1211 * only 11 bytes...so we must pick and choose required
1212 * registers based on the command. So, we drop feature and
1213 * hob_feature for [RW] DMA commands, but they are needed for
1214 * NCQ. NCQ will drop hob_nsect.
20f733e7 1215 */
31961943
BR
1216 switch (tf->command) {
1217 case ATA_CMD_READ:
1218 case ATA_CMD_READ_EXT:
1219 case ATA_CMD_WRITE:
1220 case ATA_CMD_WRITE_EXT:
c15d85c8 1221 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1222 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1223 break;
1224#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1225 case ATA_CMD_FPDMA_READ:
1226 case ATA_CMD_FPDMA_WRITE:
8b260248 1227 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1228 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1229 break;
1230#endif /* FIXME: remove this line when NCQ added */
1231 default:
1232 /* The only other commands EDMA supports in non-queued and
1233 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1234 * of which are defined/used by Linux. If we get here, this
1235 * driver needs work.
1236 *
1237 * FIXME: modify libata to give qc_prep a return value and
1238 * return error here.
1239 */
1240 BUG_ON(tf->command);
1241 break;
1242 }
1243 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1244 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1245 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1246 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1247 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1248 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1249 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1250 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1251 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1252
e4e7b892
JG
1253 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1254 return;
1255 mv_fill_sg(qc);
1256}
1257
1258/**
1259 * mv_qc_prep_iie - Host specific command preparation.
1260 * @qc: queued command to prepare
1261 *
1262 * This routine simply redirects to the general purpose routine
1263 * if command is not DMA. Else, it handles prep of the CRQB
1264 * (command request block), does some sanity checking, and calls
1265 * the SG load routine.
1266 *
1267 * LOCKING:
1268 * Inherited from caller.
1269 */
1270static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1271{
1272 struct ata_port *ap = qc->ap;
1273 struct mv_port_priv *pp = ap->private_data;
1274 struct mv_crqb_iie *crqb;
1275 struct ata_taskfile *tf;
a6432436 1276 unsigned in_index;
e4e7b892
JG
1277 u32 flags = 0;
1278
c5d3e45a 1279 if (qc->tf.protocol != ATA_PROT_DMA)
e4e7b892
JG
1280 return;
1281
e4e7b892
JG
1282 /* Fill in Gen IIE command request block
1283 */
1284 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1285 flags |= CRQB_FLAG_READ;
1286
beec7dbc 1287 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1288 flags |= qc->tag << CRQB_TAG_SHIFT;
bdd4ddde 1289 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
4537deb5 1290 what we use as our tag */
e4e7b892 1291
bdd4ddde
JG
1292 /* get current queue index from software */
1293 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1294
1295 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
e4e7b892
JG
1296 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1297 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1298 crqb->flags = cpu_to_le32(flags);
1299
1300 tf = &qc->tf;
1301 crqb->ata_cmd[0] = cpu_to_le32(
1302 (tf->command << 16) |
1303 (tf->feature << 24)
1304 );
1305 crqb->ata_cmd[1] = cpu_to_le32(
1306 (tf->lbal << 0) |
1307 (tf->lbam << 8) |
1308 (tf->lbah << 16) |
1309 (tf->device << 24)
1310 );
1311 crqb->ata_cmd[2] = cpu_to_le32(
1312 (tf->hob_lbal << 0) |
1313 (tf->hob_lbam << 8) |
1314 (tf->hob_lbah << 16) |
1315 (tf->hob_feature << 24)
1316 );
1317 crqb->ata_cmd[3] = cpu_to_le32(
1318 (tf->nsect << 0) |
1319 (tf->hob_nsect << 8)
1320 );
1321
1322 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1323 return;
31961943
BR
1324 mv_fill_sg(qc);
1325}
1326
05b308e1
BR
1327/**
1328 * mv_qc_issue - Initiate a command to the host
1329 * @qc: queued command to start
1330 *
1331 * This routine simply redirects to the general purpose routine
1332 * if command is not DMA. Else, it sanity checks our local
1333 * caches of the request producer/consumer indices then enables
1334 * DMA and bumps the request producer index.
1335 *
1336 * LOCKING:
1337 * Inherited from caller.
1338 */
9a3d9eb0 1339static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1340{
c5d3e45a
JG
1341 struct ata_port *ap = qc->ap;
1342 void __iomem *port_mmio = mv_ap_base(ap);
1343 struct mv_port_priv *pp = ap->private_data;
1344 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde 1345 u32 in_index;
31961943 1346
c5d3e45a 1347 if (qc->tf.protocol != ATA_PROT_DMA) {
31961943
BR
1348 /* We're about to send a non-EDMA capable command to the
1349 * port. Turn off EDMA so there won't be problems accessing
1350 * shadow block, etc registers.
1351 */
0ea9e179 1352 __mv_stop_dma(ap);
31961943
BR
1353 return ata_qc_issue_prot(qc);
1354 }
1355
bdd4ddde
JG
1356 mv_start_dma(port_mmio, hpriv, pp);
1357
1358 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
31961943 1359
31961943 1360 /* until we do queuing, the queue should be empty at this point */
a6432436
ML
1361 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1362 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
31961943 1363
bdd4ddde 1364 pp->req_idx++;
31961943 1365
bdd4ddde 1366 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1367
1368 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1369 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1370 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1371
1372 return 0;
1373}
1374
05b308e1
BR
1375/**
1376 * mv_err_intr - Handle error interrupts on the port
1377 * @ap: ATA channel to manipulate
9b358e30 1378 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1379 *
1380 * In most cases, just clear the interrupt and move on. However,
1381 * some cases require an eDMA reset, which is done right before
1382 * the COMRESET in mv_phy_reset(). The SERR case requires a
1383 * clear of pending errors in the SATA SERROR register. Finally,
1384 * if the port disabled DMA, update our cached copy to match.
1385 *
1386 * LOCKING:
1387 * Inherited from caller.
1388 */
bdd4ddde 1389static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1390{
1391 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1392 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1393 struct mv_port_priv *pp = ap->private_data;
1394 struct mv_host_priv *hpriv = ap->host->private_data;
1395 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1396 unsigned int action = 0, err_mask = 0;
9af5c9c9 1397 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1398
bdd4ddde 1399 ata_ehi_clear_desc(ehi);
20f733e7 1400
bdd4ddde
JG
1401 if (!edma_enabled) {
1402 /* just a guess: do we need to do this? should we
1403 * expand this, and do it in all cases?
1404 */
936fd732
TH
1405 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1406 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1407 }
bdd4ddde
JG
1408
1409 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1410
1411 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1412
1413 /*
1414 * all generations share these EDMA error cause bits
1415 */
1416
1417 if (edma_err_cause & EDMA_ERR_DEV)
1418 err_mask |= AC_ERR_DEV;
1419 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1420 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1421 EDMA_ERR_INTRL_PAR)) {
1422 err_mask |= AC_ERR_ATA_BUS;
1423 action |= ATA_EH_HARDRESET;
b64bbc39 1424 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1425 }
1426 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1427 ata_ehi_hotplugged(ehi);
1428 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1429 "dev disconnect" : "dev connect");
bdd4ddde
JG
1430 }
1431
ee9ccdf7 1432 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1433 eh_freeze_mask = EDMA_EH_FREEZE_5;
1434
1435 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1436 struct mv_port_priv *pp = ap->private_data;
1437 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1438 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1439 }
1440 } else {
1441 eh_freeze_mask = EDMA_EH_FREEZE;
1442
1443 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1444 struct mv_port_priv *pp = ap->private_data;
1445 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1446 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1447 }
1448
1449 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1450 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1451 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde
JG
1452 err_mask = AC_ERR_ATA_BUS;
1453 action |= ATA_EH_HARDRESET;
1454 }
afb0edd9 1455 }
20f733e7
BR
1456
1457 /* Clear EDMA now that SERR cleanup done */
1458 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1459
bdd4ddde
JG
1460 if (!err_mask) {
1461 err_mask = AC_ERR_OTHER;
1462 action |= ATA_EH_HARDRESET;
1463 }
1464
1465 ehi->serror |= serr;
1466 ehi->action |= action;
1467
1468 if (qc)
1469 qc->err_mask |= err_mask;
1470 else
1471 ehi->err_mask |= err_mask;
1472
1473 if (edma_err_cause & eh_freeze_mask)
1474 ata_port_freeze(ap);
1475 else
1476 ata_port_abort(ap);
1477}
1478
1479static void mv_intr_pio(struct ata_port *ap)
1480{
1481 struct ata_queued_cmd *qc;
1482 u8 ata_status;
1483
1484 /* ignore spurious intr if drive still BUSY */
1485 ata_status = readb(ap->ioaddr.status_addr);
1486 if (unlikely(ata_status & ATA_BUSY))
1487 return;
1488
1489 /* get active ATA command */
9af5c9c9 1490 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1491 if (unlikely(!qc)) /* no active tag */
1492 return;
1493 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1494 return;
1495
1496 /* and finally, complete the ATA command */
1497 qc->err_mask |= ac_err_mask(ata_status);
1498 ata_qc_complete(qc);
1499}
1500
1501static void mv_intr_edma(struct ata_port *ap)
1502{
1503 void __iomem *port_mmio = mv_ap_base(ap);
1504 struct mv_host_priv *hpriv = ap->host->private_data;
1505 struct mv_port_priv *pp = ap->private_data;
1506 struct ata_queued_cmd *qc;
1507 u32 out_index, in_index;
1508 bool work_done = false;
1509
1510 /* get h/w response queue pointer */
1511 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1512 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1513
1514 while (1) {
1515 u16 status;
6c1153e0 1516 unsigned int tag;
bdd4ddde
JG
1517
1518 /* get s/w response queue last-read pointer, and compare */
1519 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1520 if (in_index == out_index)
1521 break;
1522
bdd4ddde 1523 /* 50xx: get active ATA command */
0ea9e179 1524 if (IS_GEN_I(hpriv))
9af5c9c9 1525 tag = ap->link.active_tag;
bdd4ddde 1526
6c1153e0
JG
1527 /* Gen II/IIE: get active ATA command via tag, to enable
1528 * support for queueing. this works transparently for
1529 * queued and non-queued modes.
bdd4ddde 1530 */
6c1153e0
JG
1531 else if (IS_GEN_II(hpriv))
1532 tag = (le16_to_cpu(pp->crpb[out_index].id)
1533 >> CRPB_IOID_SHIFT_6) & 0x3f;
bdd4ddde 1534
6c1153e0
JG
1535 else /* IS_GEN_IIE */
1536 tag = (le16_to_cpu(pp->crpb[out_index].id)
1537 >> CRPB_IOID_SHIFT_7) & 0x3f;
bdd4ddde 1538
6c1153e0 1539 qc = ata_qc_from_tag(ap, tag);
bdd4ddde
JG
1540
1541 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1542 * bits (WARNING: might not necessarily be associated
1543 * with this command), which -should- be clear
1544 * if all is well
1545 */
1546 status = le16_to_cpu(pp->crpb[out_index].flags);
1547 if (unlikely(status & 0xff)) {
1548 mv_err_intr(ap, qc);
1549 return;
1550 }
1551
1552 /* and finally, complete the ATA command */
1553 if (qc) {
1554 qc->err_mask |=
1555 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1556 ata_qc_complete(qc);
1557 }
1558
0ea9e179 1559 /* advance software response queue pointer, to
bdd4ddde
JG
1560 * indicate (after the loop completes) to hardware
1561 * that we have consumed a response queue entry.
1562 */
1563 work_done = true;
1564 pp->resp_idx++;
1565 }
1566
1567 if (work_done)
1568 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1569 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1570 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1571}
1572
05b308e1
BR
1573/**
1574 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1575 * @host: host specific structure
05b308e1
BR
1576 * @relevant: port error bits relevant to this host controller
1577 * @hc: which host controller we're to look at
1578 *
1579 * Read then write clear the HC interrupt status then walk each
1580 * port connected to the HC and see if it needs servicing. Port
1581 * success ints are reported in the HC interrupt status reg, the
1582 * port error ints are reported in the higher level main
1583 * interrupt status register and thus are passed in via the
1584 * 'relevant' argument.
1585 *
1586 * LOCKING:
1587 * Inherited from caller.
1588 */
cca3974e 1589static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1590{
0d5ff566 1591 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7 1592 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1593 u32 hc_irq_cause;
c5d3e45a 1594 int port, port0;
20f733e7 1595
35177265 1596 if (hc == 0)
20f733e7 1597 port0 = 0;
35177265 1598 else
20f733e7 1599 port0 = MV_PORTS_PER_HC;
20f733e7
BR
1600
1601 /* we'll need the HC success int register in most cases */
1602 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1603 if (!hc_irq_cause)
1604 return;
1605
1606 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1607
1608 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1609 hc,relevant,hc_irq_cause);
1610
1611 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
cca3974e 1612 struct ata_port *ap = host->ports[port];
63af2a5c 1613 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1614 int have_err_bits, hard_port, shift;
55d8ca4f 1615
bdd4ddde 1616 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1617 continue;
1618
31961943 1619 shift = port << 1; /* (port * 2) */
20f733e7
BR
1620 if (port >= MV_PORTS_PER_HC) {
1621 shift++; /* skip bit 8 in the HC Main IRQ reg */
1622 }
bdd4ddde
JG
1623 have_err_bits = ((PORT0_ERR << shift) & relevant);
1624
1625 if (unlikely(have_err_bits)) {
1626 struct ata_queued_cmd *qc;
8b260248 1627
9af5c9c9 1628 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1629 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1630 continue;
1631
1632 mv_err_intr(ap, qc);
1633 continue;
1634 }
1635
1636 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1637
1638 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1639 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1640 mv_intr_edma(ap);
1641 } else {
1642 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1643 mv_intr_pio(ap);
20f733e7
BR
1644 }
1645 }
1646 VPRINTK("EXIT\n");
1647}
1648
bdd4ddde
JG
1649static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1650{
1651 struct ata_port *ap;
1652 struct ata_queued_cmd *qc;
1653 struct ata_eh_info *ehi;
1654 unsigned int i, err_mask, printed = 0;
1655 u32 err_cause;
1656
1657 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1658
1659 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1660 err_cause);
1661
1662 DPRINTK("All regs @ PCI error\n");
1663 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1664
1665 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1666
1667 for (i = 0; i < host->n_ports; i++) {
1668 ap = host->ports[i];
936fd732 1669 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1670 ehi = &ap->link.eh_info;
bdd4ddde
JG
1671 ata_ehi_clear_desc(ehi);
1672 if (!printed++)
1673 ata_ehi_push_desc(ehi,
1674 "PCI err cause 0x%08x", err_cause);
1675 err_mask = AC_ERR_HOST_BUS;
1676 ehi->action = ATA_EH_HARDRESET;
9af5c9c9 1677 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1678 if (qc)
1679 qc->err_mask |= err_mask;
1680 else
1681 ehi->err_mask |= err_mask;
1682
1683 ata_port_freeze(ap);
1684 }
1685 }
1686}
1687
05b308e1 1688/**
c5d3e45a 1689 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1690 * @irq: unused
1691 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1692 *
1693 * Read the read only register to determine if any host
1694 * controllers have pending interrupts. If so, call lower level
1695 * routine to handle. Also check for PCI errors which are only
1696 * reported here.
1697 *
8b260248 1698 * LOCKING:
cca3974e 1699 * This routine holds the host lock while processing pending
05b308e1
BR
1700 * interrupts.
1701 */
7d12e780 1702static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1703{
cca3974e 1704 struct ata_host *host = dev_instance;
20f733e7 1705 unsigned int hc, handled = 0, n_hcs;
0d5ff566 1706 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7
BR
1707 u32 irq_stat;
1708
20f733e7 1709 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
20f733e7
BR
1710
1711 /* check the cases where we either have nothing pending or have read
1712 * a bogus register value which can indicate HW removal or PCI fault
1713 */
35177265 1714 if (!irq_stat || (0xffffffffU == irq_stat))
20f733e7 1715 return IRQ_NONE;
20f733e7 1716
cca3974e
JG
1717 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1718 spin_lock(&host->lock);
20f733e7 1719
bdd4ddde
JG
1720 if (unlikely(irq_stat & PCI_ERR)) {
1721 mv_pci_error(host, mmio);
1722 handled = 1;
1723 goto out_unlock; /* skip all other HC irq handling */
1724 }
1725
20f733e7
BR
1726 for (hc = 0; hc < n_hcs; hc++) {
1727 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1728 if (relevant) {
cca3974e 1729 mv_host_intr(host, relevant, hc);
bdd4ddde 1730 handled = 1;
20f733e7
BR
1731 }
1732 }
615ab953 1733
bdd4ddde 1734out_unlock:
cca3974e 1735 spin_unlock(&host->lock);
20f733e7
BR
1736
1737 return IRQ_RETVAL(handled);
1738}
1739
c9d39130
JG
1740static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1741{
1742 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1743 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1744
1745 return hc_mmio + ofs;
1746}
1747
1748static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1749{
1750 unsigned int ofs;
1751
1752 switch (sc_reg_in) {
1753 case SCR_STATUS:
1754 case SCR_ERROR:
1755 case SCR_CONTROL:
1756 ofs = sc_reg_in * sizeof(u32);
1757 break;
1758 default:
1759 ofs = 0xffffffffU;
1760 break;
1761 }
1762 return ofs;
1763}
1764
da3dbb17 1765static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1766{
0d5ff566
TH
1767 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1768 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1769 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1770
da3dbb17
TH
1771 if (ofs != 0xffffffffU) {
1772 *val = readl(addr + ofs);
1773 return 0;
1774 } else
1775 return -EINVAL;
c9d39130
JG
1776}
1777
da3dbb17 1778static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1779{
0d5ff566
TH
1780 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1781 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1782 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1783
da3dbb17 1784 if (ofs != 0xffffffffU) {
0d5ff566 1785 writelfl(val, addr + ofs);
da3dbb17
TH
1786 return 0;
1787 } else
1788 return -EINVAL;
c9d39130
JG
1789}
1790
522479fb
JG
1791static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1792{
522479fb
JG
1793 int early_5080;
1794
44c10138 1795 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1796
1797 if (!early_5080) {
1798 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1799 tmp |= (1 << 0);
1800 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1801 }
1802
1803 mv_reset_pci_bus(pdev, mmio);
1804}
1805
1806static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1807{
1808 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1809}
1810
47c2b677 1811static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1812 void __iomem *mmio)
1813{
c9d39130
JG
1814 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1815 u32 tmp;
1816
1817 tmp = readl(phy_mmio + MV5_PHY_MODE);
1818
1819 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1820 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1821}
1822
47c2b677 1823static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1824{
522479fb
JG
1825 u32 tmp;
1826
1827 writel(0, mmio + MV_GPIO_PORT_CTL);
1828
1829 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1830
1831 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1832 tmp |= ~(1 << 0);
1833 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1834}
1835
2a47ce06
JG
1836static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1837 unsigned int port)
bca1c4eb 1838{
c9d39130
JG
1839 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1840 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1841 u32 tmp;
1842 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1843
1844 if (fix_apm_sq) {
1845 tmp = readl(phy_mmio + MV5_LT_MODE);
1846 tmp |= (1 << 19);
1847 writel(tmp, phy_mmio + MV5_LT_MODE);
1848
1849 tmp = readl(phy_mmio + MV5_PHY_CTL);
1850 tmp &= ~0x3;
1851 tmp |= 0x1;
1852 writel(tmp, phy_mmio + MV5_PHY_CTL);
1853 }
1854
1855 tmp = readl(phy_mmio + MV5_PHY_MODE);
1856 tmp &= ~mask;
1857 tmp |= hpriv->signal[port].pre;
1858 tmp |= hpriv->signal[port].amps;
1859 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1860}
1861
c9d39130
JG
1862
1863#undef ZERO
1864#define ZERO(reg) writel(0, port_mmio + (reg))
1865static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1866 unsigned int port)
1867{
1868 void __iomem *port_mmio = mv_port_base(mmio, port);
1869
1870 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1871
1872 mv_channel_reset(hpriv, mmio, port);
1873
1874 ZERO(0x028); /* command */
1875 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1876 ZERO(0x004); /* timer */
1877 ZERO(0x008); /* irq err cause */
1878 ZERO(0x00c); /* irq err mask */
1879 ZERO(0x010); /* rq bah */
1880 ZERO(0x014); /* rq inp */
1881 ZERO(0x018); /* rq outp */
1882 ZERO(0x01c); /* respq bah */
1883 ZERO(0x024); /* respq outp */
1884 ZERO(0x020); /* respq inp */
1885 ZERO(0x02c); /* test control */
1886 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1887}
1888#undef ZERO
1889
1890#define ZERO(reg) writel(0, hc_mmio + (reg))
1891static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1892 unsigned int hc)
47c2b677 1893{
c9d39130
JG
1894 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1895 u32 tmp;
1896
1897 ZERO(0x00c);
1898 ZERO(0x010);
1899 ZERO(0x014);
1900 ZERO(0x018);
1901
1902 tmp = readl(hc_mmio + 0x20);
1903 tmp &= 0x1c1c1c1c;
1904 tmp |= 0x03030303;
1905 writel(tmp, hc_mmio + 0x20);
1906}
1907#undef ZERO
1908
1909static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1910 unsigned int n_hc)
1911{
1912 unsigned int hc, port;
1913
1914 for (hc = 0; hc < n_hc; hc++) {
1915 for (port = 0; port < MV_PORTS_PER_HC; port++)
1916 mv5_reset_hc_port(hpriv, mmio,
1917 (hc * MV_PORTS_PER_HC) + port);
1918
1919 mv5_reset_one_hc(hpriv, mmio, hc);
1920 }
1921
1922 return 0;
47c2b677
JG
1923}
1924
101ffae2
JG
1925#undef ZERO
1926#define ZERO(reg) writel(0, mmio + (reg))
1927static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1928{
1929 u32 tmp;
1930
1931 tmp = readl(mmio + MV_PCI_MODE);
1932 tmp &= 0xff00ffff;
1933 writel(tmp, mmio + MV_PCI_MODE);
1934
1935 ZERO(MV_PCI_DISC_TIMER);
1936 ZERO(MV_PCI_MSI_TRIGGER);
1937 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1938 ZERO(HC_MAIN_IRQ_MASK_OFS);
1939 ZERO(MV_PCI_SERR_MASK);
1940 ZERO(PCI_IRQ_CAUSE_OFS);
1941 ZERO(PCI_IRQ_MASK_OFS);
1942 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1943 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1944 ZERO(MV_PCI_ERR_ATTRIBUTE);
1945 ZERO(MV_PCI_ERR_COMMAND);
1946}
1947#undef ZERO
1948
1949static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1950{
1951 u32 tmp;
1952
1953 mv5_reset_flash(hpriv, mmio);
1954
1955 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1956 tmp &= 0x3;
1957 tmp |= (1 << 5) | (1 << 6);
1958 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1959}
1960
1961/**
1962 * mv6_reset_hc - Perform the 6xxx global soft reset
1963 * @mmio: base address of the HBA
1964 *
1965 * This routine only applies to 6xxx parts.
1966 *
1967 * LOCKING:
1968 * Inherited from caller.
1969 */
c9d39130
JG
1970static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1971 unsigned int n_hc)
101ffae2
JG
1972{
1973 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1974 int i, rc = 0;
1975 u32 t;
1976
1977 /* Following procedure defined in PCI "main command and status
1978 * register" table.
1979 */
1980 t = readl(reg);
1981 writel(t | STOP_PCI_MASTER, reg);
1982
1983 for (i = 0; i < 1000; i++) {
1984 udelay(1);
1985 t = readl(reg);
1986 if (PCI_MASTER_EMPTY & t) {
1987 break;
1988 }
1989 }
1990 if (!(PCI_MASTER_EMPTY & t)) {
1991 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1992 rc = 1;
1993 goto done;
1994 }
1995
1996 /* set reset */
1997 i = 5;
1998 do {
1999 writel(t | GLOB_SFT_RST, reg);
2000 t = readl(reg);
2001 udelay(1);
2002 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2003
2004 if (!(GLOB_SFT_RST & t)) {
2005 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2006 rc = 1;
2007 goto done;
2008 }
2009
2010 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2011 i = 5;
2012 do {
2013 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2014 t = readl(reg);
2015 udelay(1);
2016 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2017
2018 if (GLOB_SFT_RST & t) {
2019 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2020 rc = 1;
2021 }
2022done:
2023 return rc;
2024}
2025
47c2b677 2026static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2027 void __iomem *mmio)
2028{
2029 void __iomem *port_mmio;
2030 u32 tmp;
2031
ba3fe8fb
JG
2032 tmp = readl(mmio + MV_RESET_CFG);
2033 if ((tmp & (1 << 0)) == 0) {
47c2b677 2034 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2035 hpriv->signal[idx].pre = 0x1 << 5;
2036 return;
2037 }
2038
2039 port_mmio = mv_port_base(mmio, idx);
2040 tmp = readl(port_mmio + PHY_MODE2);
2041
2042 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2043 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2044}
2045
47c2b677 2046static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2047{
47c2b677 2048 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2049}
2050
c9d39130 2051static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2052 unsigned int port)
bca1c4eb 2053{
c9d39130
JG
2054 void __iomem *port_mmio = mv_port_base(mmio, port);
2055
bca1c4eb 2056 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2057 int fix_phy_mode2 =
2058 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2059 int fix_phy_mode4 =
47c2b677
JG
2060 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2061 u32 m2, tmp;
2062
2063 if (fix_phy_mode2) {
2064 m2 = readl(port_mmio + PHY_MODE2);
2065 m2 &= ~(1 << 16);
2066 m2 |= (1 << 31);
2067 writel(m2, port_mmio + PHY_MODE2);
2068
2069 udelay(200);
2070
2071 m2 = readl(port_mmio + PHY_MODE2);
2072 m2 &= ~((1 << 16) | (1 << 31));
2073 writel(m2, port_mmio + PHY_MODE2);
2074
2075 udelay(200);
2076 }
2077
2078 /* who knows what this magic does */
2079 tmp = readl(port_mmio + PHY_MODE3);
2080 tmp &= ~0x7F800000;
2081 tmp |= 0x2A800000;
2082 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2083
2084 if (fix_phy_mode4) {
47c2b677 2085 u32 m4;
bca1c4eb
JG
2086
2087 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2088
2089 if (hp_flags & MV_HP_ERRATA_60X1B2)
2090 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2091
2092 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2093
2094 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2095
2096 if (hp_flags & MV_HP_ERRATA_60X1B2)
2097 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2098 }
2099
2100 /* Revert values of pre-emphasis and signal amps to the saved ones */
2101 m2 = readl(port_mmio + PHY_MODE2);
2102
2103 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2104 m2 |= hpriv->signal[port].amps;
2105 m2 |= hpriv->signal[port].pre;
47c2b677 2106 m2 &= ~(1 << 16);
bca1c4eb 2107
e4e7b892
JG
2108 /* according to mvSata 3.6.1, some IIE values are fixed */
2109 if (IS_GEN_IIE(hpriv)) {
2110 m2 &= ~0xC30FF01F;
2111 m2 |= 0x0000900F;
2112 }
2113
bca1c4eb
JG
2114 writel(m2, port_mmio + PHY_MODE2);
2115}
2116
c9d39130
JG
2117static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2118 unsigned int port_no)
2119{
2120 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2121
2122 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2123
ee9ccdf7 2124 if (IS_GEN_II(hpriv)) {
c9d39130 2125 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2126 ifctl |= (1 << 7); /* enable gen2i speed */
2127 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2128 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2129 }
2130
2131 udelay(25); /* allow reset propagation */
2132
2133 /* Spec never mentions clearing the bit. Marvell's driver does
2134 * clear the bit, however.
2135 */
2136 writelfl(0, port_mmio + EDMA_CMD_OFS);
2137
2138 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2139
ee9ccdf7 2140 if (IS_GEN_I(hpriv))
c9d39130
JG
2141 mdelay(1);
2142}
2143
05b308e1 2144/**
bdd4ddde 2145 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2146 * @ap: ATA channel to manipulate
2147 *
2148 * Part of this is taken from __sata_phy_reset and modified to
2149 * not sleep since this routine gets called from interrupt level.
2150 *
2151 * LOCKING:
2152 * Inherited from caller. This is coded to safe to call at
2153 * interrupt level, i.e. it does not sleep.
31961943 2154 */
bdd4ddde
JG
2155static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2156 unsigned long deadline)
20f733e7 2157{
095fec88 2158 struct mv_port_priv *pp = ap->private_data;
cca3974e 2159 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2160 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2161 int retry = 5;
2162 u32 sstatus;
20f733e7
BR
2163
2164 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2165
da3dbb17
TH
2166#ifdef DEBUG
2167 {
2168 u32 sstatus, serror, scontrol;
2169
2170 mv_scr_read(ap, SCR_STATUS, &sstatus);
2171 mv_scr_read(ap, SCR_ERROR, &serror);
2172 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2173 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2174 "SCtrl 0x%08x\n", status, serror, scontrol);
2175 }
2176#endif
20f733e7 2177
22374677
JG
2178 /* Issue COMRESET via SControl */
2179comreset_retry:
936fd732 2180 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
bdd4ddde 2181 msleep(1);
22374677 2182
936fd732 2183 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
bdd4ddde 2184 msleep(20);
22374677 2185
31961943 2186 do {
936fd732 2187 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
62f1d0e6 2188 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2189 break;
22374677 2190
bdd4ddde 2191 msleep(1);
c5d3e45a 2192 } while (time_before(jiffies, deadline));
20f733e7 2193
22374677 2194 /* work around errata */
ee9ccdf7 2195 if (IS_GEN_II(hpriv) &&
22374677
JG
2196 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2197 (retry-- > 0))
2198 goto comreset_retry;
095fec88 2199
da3dbb17
TH
2200#ifdef DEBUG
2201 {
2202 u32 sstatus, serror, scontrol;
2203
2204 mv_scr_read(ap, SCR_STATUS, &sstatus);
2205 mv_scr_read(ap, SCR_ERROR, &serror);
2206 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2207 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2208 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2209 }
2210#endif
31961943 2211
936fd732 2212 if (ata_link_offline(&ap->link)) {
bdd4ddde 2213 *class = ATA_DEV_NONE;
20f733e7
BR
2214 return;
2215 }
2216
22374677
JG
2217 /* even after SStatus reflects that device is ready,
2218 * it seems to take a while for link to be fully
2219 * established (and thus Status no longer 0x80/0x7F),
2220 * so we poll a bit for that, here.
2221 */
2222 retry = 20;
2223 while (1) {
2224 u8 drv_stat = ata_check_status(ap);
2225 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2226 break;
bdd4ddde 2227 msleep(500);
22374677
JG
2228 if (retry-- <= 0)
2229 break;
bdd4ddde
JG
2230 if (time_after(jiffies, deadline))
2231 break;
22374677
JG
2232 }
2233
bdd4ddde
JG
2234 /* FIXME: if we passed the deadline, the following
2235 * code probably produces an invalid result
2236 */
20f733e7 2237
bdd4ddde 2238 /* finally, read device signature from TF registers */
3f19859e 2239 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
095fec88
JG
2240
2241 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2242
bdd4ddde 2243 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2244
bca1c4eb 2245 VPRINTK("EXIT\n");
20f733e7
BR
2246}
2247
cc0680a5 2248static int mv_prereset(struct ata_link *link, unsigned long deadline)
22374677 2249{
cc0680a5 2250 struct ata_port *ap = link->ap;
bdd4ddde 2251 struct mv_port_priv *pp = ap->private_data;
cc0680a5 2252 struct ata_eh_context *ehc = &link->eh_context;
bdd4ddde 2253 int rc;
0ea9e179 2254
bdd4ddde
JG
2255 rc = mv_stop_dma(ap);
2256 if (rc)
2257 ehc->i.action |= ATA_EH_HARDRESET;
2258
2259 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2260 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2261 ehc->i.action |= ATA_EH_HARDRESET;
2262 }
2263
2264 /* if we're about to do hardreset, nothing more to do */
2265 if (ehc->i.action & ATA_EH_HARDRESET)
2266 return 0;
2267
cc0680a5 2268 if (ata_link_online(link))
bdd4ddde
JG
2269 rc = ata_wait_ready(ap, deadline);
2270 else
2271 rc = -ENODEV;
2272
2273 return rc;
22374677
JG
2274}
2275
cc0680a5 2276static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2277 unsigned long deadline)
31961943 2278{
cc0680a5 2279 struct ata_port *ap = link->ap;
bdd4ddde 2280 struct mv_host_priv *hpriv = ap->host->private_data;
0d5ff566 2281 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
31961943 2282
bdd4ddde 2283 mv_stop_dma(ap);
31961943 2284
bdd4ddde 2285 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2286
bdd4ddde
JG
2287 mv_phy_reset(ap, class, deadline);
2288
2289 return 0;
2290}
2291
cc0680a5 2292static void mv_postreset(struct ata_link *link, unsigned int *classes)
bdd4ddde 2293{
cc0680a5 2294 struct ata_port *ap = link->ap;
bdd4ddde
JG
2295 u32 serr;
2296
2297 /* print link status */
cc0680a5 2298 sata_print_link_status(link);
31961943 2299
bdd4ddde 2300 /* clear SError */
cc0680a5
TH
2301 sata_scr_read(link, SCR_ERROR, &serr);
2302 sata_scr_write_flush(link, SCR_ERROR, serr);
bdd4ddde
JG
2303
2304 /* bail out if no device is present */
2305 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2306 DPRINTK("EXIT, no device\n");
2307 return;
9b358e30 2308 }
bdd4ddde
JG
2309
2310 /* set up device control */
2311 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2312}
2313
2314static void mv_error_handler(struct ata_port *ap)
2315{
2316 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2317 mv_hardreset, mv_postreset);
2318}
2319
2320static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2321{
2322 mv_stop_dma(qc->ap);
2323}
2324
2325static void mv_eh_freeze(struct ata_port *ap)
2326{
2327 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2328 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2329 u32 tmp, mask;
2330 unsigned int shift;
2331
2332 /* FIXME: handle coalescing completion events properly */
2333
2334 shift = ap->port_no * 2;
2335 if (hc > 0)
2336 shift++;
2337
2338 mask = 0x3 << shift;
2339
2340 /* disable assertion of portN err, done events */
2341 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2342 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2343}
2344
2345static void mv_eh_thaw(struct ata_port *ap)
2346{
2347 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2348 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2349 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2350 void __iomem *port_mmio = mv_ap_base(ap);
2351 u32 tmp, mask, hc_irq_cause;
2352 unsigned int shift, hc_port_no = ap->port_no;
2353
2354 /* FIXME: handle coalescing completion events properly */
2355
2356 shift = ap->port_no * 2;
2357 if (hc > 0) {
2358 shift++;
2359 hc_port_no -= 4;
2360 }
2361
2362 mask = 0x3 << shift;
2363
2364 /* clear EDMA errors on this port */
2365 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2366
2367 /* clear pending irq events */
2368 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2369 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2370 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2371 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2372
2373 /* enable assertion of portN err, done events */
2374 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2375 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
31961943
BR
2376}
2377
05b308e1
BR
2378/**
2379 * mv_port_init - Perform some early initialization on a single port.
2380 * @port: libata data structure storing shadow register addresses
2381 * @port_mmio: base address of the port
2382 *
2383 * Initialize shadow register mmio addresses, clear outstanding
2384 * interrupts on the port, and unmask interrupts for the future
2385 * start of the port.
2386 *
2387 * LOCKING:
2388 * Inherited from caller.
2389 */
31961943 2390static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2391{
0d5ff566 2392 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2393 unsigned serr_ofs;
2394
8b260248 2395 /* PIO related setup
31961943
BR
2396 */
2397 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2398 port->error_addr =
31961943
BR
2399 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2400 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2401 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2402 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2403 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2404 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2405 port->status_addr =
31961943
BR
2406 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2407 /* special case: control/altstatus doesn't have ATA_REG_ address */
2408 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2409
2410 /* unused: */
8d9db2d2 2411 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2412
31961943
BR
2413 /* Clear any currently outstanding port interrupt conditions */
2414 serr_ofs = mv_scr_offset(SCR_ERROR);
2415 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2416 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2417
20f733e7 2418 /* unmask all EDMA error interrupts */
31961943 2419 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2420
8b260248 2421 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2422 readl(port_mmio + EDMA_CFG_OFS),
2423 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2424 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2425}
2426
4447d351 2427static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2428{
4447d351
TH
2429 struct pci_dev *pdev = to_pci_dev(host->dev);
2430 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2431 u32 hp_flags = hpriv->hp_flags;
2432
bca1c4eb 2433 switch(board_idx) {
47c2b677
JG
2434 case chip_5080:
2435 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2436 hp_flags |= MV_HP_GEN_I;
47c2b677 2437
44c10138 2438 switch (pdev->revision) {
47c2b677
JG
2439 case 0x1:
2440 hp_flags |= MV_HP_ERRATA_50XXB0;
2441 break;
2442 case 0x3:
2443 hp_flags |= MV_HP_ERRATA_50XXB2;
2444 break;
2445 default:
2446 dev_printk(KERN_WARNING, &pdev->dev,
2447 "Applying 50XXB2 workarounds to unknown rev\n");
2448 hp_flags |= MV_HP_ERRATA_50XXB2;
2449 break;
2450 }
2451 break;
2452
bca1c4eb
JG
2453 case chip_504x:
2454 case chip_508x:
47c2b677 2455 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2456 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2457
44c10138 2458 switch (pdev->revision) {
47c2b677
JG
2459 case 0x0:
2460 hp_flags |= MV_HP_ERRATA_50XXB0;
2461 break;
2462 case 0x3:
2463 hp_flags |= MV_HP_ERRATA_50XXB2;
2464 break;
2465 default:
2466 dev_printk(KERN_WARNING, &pdev->dev,
2467 "Applying B2 workarounds to unknown rev\n");
2468 hp_flags |= MV_HP_ERRATA_50XXB2;
2469 break;
bca1c4eb
JG
2470 }
2471 break;
2472
2473 case chip_604x:
2474 case chip_608x:
47c2b677 2475 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2476 hp_flags |= MV_HP_GEN_II;
47c2b677 2477
44c10138 2478 switch (pdev->revision) {
47c2b677
JG
2479 case 0x7:
2480 hp_flags |= MV_HP_ERRATA_60X1B2;
2481 break;
2482 case 0x9:
2483 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2484 break;
2485 default:
2486 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2487 "Applying B2 workarounds to unknown rev\n");
2488 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2489 break;
2490 }
2491 break;
2492
e4e7b892
JG
2493 case chip_7042:
2494 case chip_6042:
2495 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2496 hp_flags |= MV_HP_GEN_IIE;
2497
44c10138 2498 switch (pdev->revision) {
e4e7b892
JG
2499 case 0x0:
2500 hp_flags |= MV_HP_ERRATA_XX42A0;
2501 break;
2502 case 0x1:
2503 hp_flags |= MV_HP_ERRATA_60X1C0;
2504 break;
2505 default:
2506 dev_printk(KERN_WARNING, &pdev->dev,
2507 "Applying 60X1C0 workarounds to unknown rev\n");
2508 hp_flags |= MV_HP_ERRATA_60X1C0;
2509 break;
2510 }
2511 break;
2512
bca1c4eb
JG
2513 default:
2514 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2515 return 1;
2516 }
2517
2518 hpriv->hp_flags = hp_flags;
2519
2520 return 0;
2521}
2522
05b308e1 2523/**
47c2b677 2524 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2525 * @host: ATA host to initialize
2526 * @board_idx: controller index
05b308e1
BR
2527 *
2528 * If possible, do an early global reset of the host. Then do
2529 * our port init and clear/unmask all/relevant host interrupts.
2530 *
2531 * LOCKING:
2532 * Inherited from caller.
2533 */
4447d351 2534static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2535{
2536 int rc = 0, n_hc, port, hc;
4447d351
TH
2537 struct pci_dev *pdev = to_pci_dev(host->dev);
2538 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2539 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb 2540
47c2b677
JG
2541 /* global interrupt mask */
2542 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2543
4447d351 2544 rc = mv_chip_id(host, board_idx);
bca1c4eb
JG
2545 if (rc)
2546 goto done;
2547
4447d351 2548 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2549
4447d351 2550 for (port = 0; port < host->n_ports; port++)
47c2b677 2551 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2552
c9d39130 2553 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2554 if (rc)
20f733e7 2555 goto done;
20f733e7 2556
522479fb
JG
2557 hpriv->ops->reset_flash(hpriv, mmio);
2558 hpriv->ops->reset_bus(pdev, mmio);
47c2b677 2559 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2560
4447d351 2561 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2562 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2563 void __iomem *port_mmio = mv_port_base(mmio, port);
2564
2a47ce06 2565 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2566 ifctl |= (1 << 7); /* enable gen2i speed */
2567 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2568 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2569 }
2570
c9d39130 2571 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2572 }
2573
4447d351 2574 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2575 struct ata_port *ap = host->ports[port];
2a47ce06 2576 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2577 unsigned int offset = port_mmio - mmio;
2578
2579 mv_port_init(&ap->ioaddr, port_mmio);
2580
2581 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2582 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
20f733e7
BR
2583 }
2584
2585 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2586 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2587
2588 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2589 "(before clear)=0x%08x\n", hc,
2590 readl(hc_mmio + HC_CFG_OFS),
2591 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2592
2593 /* Clear any currently outstanding hc interrupt conditions */
2594 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2595 }
2596
31961943
BR
2597 /* Clear any currently outstanding host interrupt conditions */
2598 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2599
2600 /* and unmask interrupt generation for host regs */
2601 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
fb621e2f 2602
ee9ccdf7 2603 if (IS_GEN_I(hpriv))
fb621e2f
JG
2604 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2605 else
2606 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
2607
2608 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
8b260248 2609 "PCI int cause/mask=0x%08x/0x%08x\n",
20f733e7
BR
2610 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2611 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2612 readl(mmio + PCI_IRQ_CAUSE_OFS),
2613 readl(mmio + PCI_IRQ_MASK_OFS));
bca1c4eb 2614
31961943 2615done:
20f733e7
BR
2616 return rc;
2617}
2618
05b308e1
BR
2619/**
2620 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2621 * @host: ATA host to print info about
05b308e1
BR
2622 *
2623 * FIXME: complete this.
2624 *
2625 * LOCKING:
2626 * Inherited from caller.
2627 */
4447d351 2628static void mv_print_info(struct ata_host *host)
31961943 2629{
4447d351
TH
2630 struct pci_dev *pdev = to_pci_dev(host->dev);
2631 struct mv_host_priv *hpriv = host->private_data;
44c10138 2632 u8 scc;
c1e4fe71 2633 const char *scc_s, *gen;
31961943
BR
2634
2635 /* Use this to determine the HW stepping of the chip so we know
2636 * what errata to workaround
2637 */
31961943
BR
2638 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2639 if (scc == 0)
2640 scc_s = "SCSI";
2641 else if (scc == 0x01)
2642 scc_s = "RAID";
2643 else
c1e4fe71
JG
2644 scc_s = "?";
2645
2646 if (IS_GEN_I(hpriv))
2647 gen = "I";
2648 else if (IS_GEN_II(hpriv))
2649 gen = "II";
2650 else if (IS_GEN_IIE(hpriv))
2651 gen = "IIE";
2652 else
2653 gen = "?";
31961943 2654
a9524a76 2655 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2656 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2657 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2658 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2659}
2660
05b308e1
BR
2661/**
2662 * mv_init_one - handle a positive probe of a Marvell host
2663 * @pdev: PCI device found
2664 * @ent: PCI device ID entry for the matched host
2665 *
2666 * LOCKING:
2667 * Inherited from caller.
2668 */
20f733e7
BR
2669static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2670{
2671 static int printed_version = 0;
20f733e7 2672 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
2673 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2674 struct ata_host *host;
2675 struct mv_host_priv *hpriv;
2676 int n_ports, rc;
20f733e7 2677
a9524a76
JG
2678 if (!printed_version++)
2679 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 2680
4447d351
TH
2681 /* allocate host */
2682 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2683
2684 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2685 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2686 if (!host || !hpriv)
2687 return -ENOMEM;
2688 host->private_data = hpriv;
2689
2690 /* acquire resources */
24dc5f33
TH
2691 rc = pcim_enable_device(pdev);
2692 if (rc)
20f733e7 2693 return rc;
20f733e7 2694
0d5ff566
TH
2695 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2696 if (rc == -EBUSY)
24dc5f33 2697 pcim_pin_device(pdev);
0d5ff566 2698 if (rc)
24dc5f33 2699 return rc;
4447d351 2700 host->iomap = pcim_iomap_table(pdev);
20f733e7 2701
d88184fb
JG
2702 rc = pci_go_64(pdev);
2703 if (rc)
2704 return rc;
2705
20f733e7 2706 /* initialize adapter */
4447d351 2707 rc = mv_init_host(host, board_idx);
24dc5f33
TH
2708 if (rc)
2709 return rc;
20f733e7 2710
31961943 2711 /* Enable interrupts */
6a59dcf8 2712 if (msi && pci_enable_msi(pdev))
31961943 2713 pci_intx(pdev, 1);
20f733e7 2714
31961943 2715 mv_dump_pci_cfg(pdev, 0x68);
4447d351 2716 mv_print_info(host);
20f733e7 2717
4447d351 2718 pci_set_master(pdev);
ea8b4db9 2719 pci_try_set_mwi(pdev);
4447d351 2720 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 2721 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7
BR
2722}
2723
2724static int __init mv_init(void)
2725{
b7887196 2726 return pci_register_driver(&mv_pci_driver);
20f733e7
BR
2727}
2728
2729static void __exit mv_exit(void)
2730{
2731 pci_unregister_driver(&mv_pci_driver);
2732}
2733
2734MODULE_AUTHOR("Brett Russ");
2735MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2736MODULE_LICENSE("GPL");
2737MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2738MODULE_VERSION(DRV_VERSION);
2739
ddef9bb3
JG
2740module_param(msi, int, 0444);
2741MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2742
20f733e7
BR
2743module_init(mv_init);
2744module_exit(mv_exit);