]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/sata_mv.c
Linux 2.6.23
[net-next-2.6.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
4a05e209
JG
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
4a05e209
JG
38 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
20f733e7
BR
61#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
20f733e7 68#include <linux/dma-mapping.h>
a9524a76 69#include <linux/device.h>
20f733e7 70#include <scsi/scsi_host.h>
193515d5 71#include <scsi/scsi_cmnd.h>
20f733e7 72#include <linux/libata.h>
20f733e7
BR
73
74#define DRV_NAME "sata_mv"
2a3103ce 75#define DRV_VERSION "1.0"
20f733e7
BR
76
77enum {
78 /* BAR's are enumerated in terms of pci_resource_start() terms */
79 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
80 MV_IO_BAR = 2, /* offset 0x18: IO space */
81 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
82
83 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
84 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
85
86 MV_PCI_REG_BASE = 0,
87 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
88 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
89 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
90 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
91 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
92 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
93
20f733e7 94 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 95 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
96 MV_GPIO_PORT_CTL = 0x104f0,
97 MV_RESET_CFG = 0x180d8,
20f733e7
BR
98
99 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
100 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
102 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
103
31961943
BR
104 MV_MAX_Q_DEPTH = 32,
105 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
106
107 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
108 * CRPB needs alignment on a 256B boundary. Size == 256B
109 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
111 */
112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
114 MV_MAX_SG_CT = 176,
115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
116 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
117
20f733e7
BR
118 MV_PORTS_PER_HC = 4,
119 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
120 MV_PORT_HC_SHIFT = 2,
31961943 121 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
122 MV_PORT_MASK = 3,
123
124 /* Host Flags */
125 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
126 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
c5d3e45a 127 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
128 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
129 ATA_FLAG_PIO_POLLING,
47c2b677 130 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 131
31961943
BR
132 CRQB_FLAG_READ = (1 << 0),
133 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
134 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
135 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
136 CRQB_CMD_ADDR_SHIFT = 8,
137 CRQB_CMD_CS = (0x2 << 11),
138 CRQB_CMD_LAST = (1 << 15),
139
140 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
141 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
142 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
143
144 EPRD_FLAG_END_OF_TBL = (1 << 31),
145
20f733e7
BR
146 /* PCI interface registers */
147
31961943
BR
148 PCI_COMMAND_OFS = 0xc00,
149
20f733e7
BR
150 PCI_MAIN_CMD_STS_OFS = 0xd30,
151 STOP_PCI_MASTER = (1 << 2),
152 PCI_MASTER_EMPTY = (1 << 3),
153 GLOB_SFT_RST = (1 << 4),
154
522479fb
JG
155 MV_PCI_MODE = 0xd00,
156 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
157 MV_PCI_DISC_TIMER = 0xd04,
158 MV_PCI_MSI_TRIGGER = 0xc38,
159 MV_PCI_SERR_MASK = 0xc28,
160 MV_PCI_XBAR_TMOUT = 0x1d04,
161 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
162 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
163 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
164 MV_PCI_ERR_COMMAND = 0x1d50,
165
166 PCI_IRQ_CAUSE_OFS = 0x1d58,
167 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
168 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
169
170 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
171 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
172 PORT0_ERR = (1 << 0), /* shift by port # */
173 PORT0_DONE = (1 << 1), /* shift by port # */
174 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
175 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
176 PCI_ERR = (1 << 18),
177 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
178 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
179 PORTS_0_3_COAL_DONE = (1 << 8),
180 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
181 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
182 GPIO_INT = (1 << 22),
183 SELF_INT = (1 << 23),
184 TWSI_INT = (1 << 24),
185 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 186 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
8b260248 187 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
188 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
189 HC_MAIN_RSVD),
fb621e2f
JG
190 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
191 HC_MAIN_RSVD_5),
20f733e7
BR
192
193 /* SATAHC registers */
194 HC_CFG_OFS = 0,
195
196 HC_IRQ_CAUSE_OFS = 0x14,
31961943 197 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
198 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
199 DEV_IRQ = (1 << 8), /* shift by port # */
200
201 /* Shadow block registers */
31961943
BR
202 SHD_BLK_OFS = 0x100,
203 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
204
205 /* SATA registers */
206 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
207 SATA_ACTIVE_OFS = 0x350,
47c2b677 208 PHY_MODE3 = 0x310,
bca1c4eb
JG
209 PHY_MODE4 = 0x314,
210 PHY_MODE2 = 0x330,
c9d39130
JG
211 MV5_PHY_MODE = 0x74,
212 MV5_LT_MODE = 0x30,
213 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
214 SATA_INTERFACE_CTL = 0x050,
215
216 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
217
218 /* Port registers */
219 EDMA_CFG_OFS = 0,
31961943
BR
220 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
221 EDMA_CFG_NCQ = (1 << 5),
222 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
223 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
224 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
225
226 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
227 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
228 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
229 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
230 EDMA_ERR_DEV = (1 << 2), /* device error */
231 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
232 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
233 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
234 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
235 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 236 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 237 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
238 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
239 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
240 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
241 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
242 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
20f733e7 243 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
6c1153e0
JG
244 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
245 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
246 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
247 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
248 EDMA_ERR_OVERRUN_5 = (1 << 5),
249 EDMA_ERR_UNDERRUN_5 = (1 << 6),
bdd4ddde
JG
250 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
251 EDMA_ERR_PRD_PAR |
252 EDMA_ERR_DEV_DCON |
253 EDMA_ERR_DEV_CON |
254 EDMA_ERR_SERR |
255 EDMA_ERR_SELF_DIS |
6c1153e0 256 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
257 EDMA_ERR_CRPB_PAR |
258 EDMA_ERR_INTRL_PAR |
259 EDMA_ERR_IORDY |
260 EDMA_ERR_LNK_CTRL_RX_2 |
261 EDMA_ERR_LNK_DATA_RX |
262 EDMA_ERR_LNK_DATA_TX |
263 EDMA_ERR_TRANS_PROTO,
264 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
265 EDMA_ERR_PRD_PAR |
266 EDMA_ERR_DEV_DCON |
267 EDMA_ERR_DEV_CON |
268 EDMA_ERR_OVERRUN_5 |
269 EDMA_ERR_UNDERRUN_5 |
270 EDMA_ERR_SELF_DIS_5 |
6c1153e0 271 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
272 EDMA_ERR_CRPB_PAR |
273 EDMA_ERR_INTRL_PAR |
274 EDMA_ERR_IORDY,
20f733e7 275
31961943
BR
276 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
277 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
278
279 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
280 EDMA_REQ_Q_PTR_SHIFT = 5,
281
282 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
283 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
284 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
285 EDMA_RSP_Q_PTR_SHIFT = 3,
286
0ea9e179
JG
287 EDMA_CMD_OFS = 0x28, /* EDMA command register */
288 EDMA_EN = (1 << 0), /* enable EDMA */
289 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
290 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 291
c9d39130 292 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 293 EDMA_ARB_CFG = 0x38,
bca1c4eb 294
31961943
BR
295 /* Host private flags (hp_flags) */
296 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
297 MV_HP_ERRATA_50XXB0 = (1 << 1),
298 MV_HP_ERRATA_50XXB2 = (1 << 2),
299 MV_HP_ERRATA_60X1B2 = (1 << 3),
300 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 301 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
302 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
303 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
304 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
20f733e7 305
31961943 306 /* Port private flags (pp_flags) */
0ea9e179
JG
307 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
308 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
309};
310
ee9ccdf7
JG
311#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
312#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 313#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
bca1c4eb 314
095fec88 315enum {
baf14aa1
JG
316 /* DMA boundary 0xffff is required by the s/g splitting
317 * we need on /length/ in mv_fill-sg().
318 */
319 MV_DMA_BOUNDARY = 0xffffU,
095fec88 320
0ea9e179
JG
321 /* mask of register bits containing lower 32 bits
322 * of EDMA request queue DMA address
323 */
095fec88
JG
324 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
325
0ea9e179 326 /* ditto, for response queue */
095fec88
JG
327 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
328};
329
522479fb
JG
330enum chip_type {
331 chip_504x,
332 chip_508x,
333 chip_5080,
334 chip_604x,
335 chip_608x,
e4e7b892
JG
336 chip_6042,
337 chip_7042,
522479fb
JG
338};
339
31961943
BR
340/* Command ReQuest Block: 32B */
341struct mv_crqb {
e1469874
ML
342 __le32 sg_addr;
343 __le32 sg_addr_hi;
344 __le16 ctrl_flags;
345 __le16 ata_cmd[11];
31961943 346};
20f733e7 347
e4e7b892 348struct mv_crqb_iie {
e1469874
ML
349 __le32 addr;
350 __le32 addr_hi;
351 __le32 flags;
352 __le32 len;
353 __le32 ata_cmd[4];
e4e7b892
JG
354};
355
31961943
BR
356/* Command ResPonse Block: 8B */
357struct mv_crpb {
e1469874
ML
358 __le16 id;
359 __le16 flags;
360 __le32 tmstmp;
20f733e7
BR
361};
362
31961943
BR
363/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
364struct mv_sg {
e1469874
ML
365 __le32 addr;
366 __le32 flags_size;
367 __le32 addr_hi;
368 __le32 reserved;
31961943 369};
20f733e7 370
31961943
BR
371struct mv_port_priv {
372 struct mv_crqb *crqb;
373 dma_addr_t crqb_dma;
374 struct mv_crpb *crpb;
375 dma_addr_t crpb_dma;
376 struct mv_sg *sg_tbl;
377 dma_addr_t sg_tbl_dma;
bdd4ddde
JG
378
379 unsigned int req_idx;
380 unsigned int resp_idx;
381
31961943
BR
382 u32 pp_flags;
383};
384
bca1c4eb
JG
385struct mv_port_signal {
386 u32 amps;
387 u32 pre;
388};
389
47c2b677
JG
390struct mv_host_priv;
391struct mv_hw_ops {
2a47ce06
JG
392 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
393 unsigned int port);
47c2b677
JG
394 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
395 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
396 void __iomem *mmio);
c9d39130
JG
397 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
398 unsigned int n_hc);
522479fb
JG
399 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
400 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
47c2b677
JG
401};
402
31961943
BR
403struct mv_host_priv {
404 u32 hp_flags;
bca1c4eb 405 struct mv_port_signal signal[8];
47c2b677 406 const struct mv_hw_ops *ops;
20f733e7
BR
407};
408
409static void mv_irq_clear(struct ata_port *ap);
da3dbb17
TH
410static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
411static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
412static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
413static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
414static int mv_port_start(struct ata_port *ap);
415static void mv_port_stop(struct ata_port *ap);
416static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 417static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 418static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde
JG
419static void mv_error_handler(struct ata_port *ap);
420static void mv_post_int_cmd(struct ata_queued_cmd *qc);
421static void mv_eh_freeze(struct ata_port *ap);
422static void mv_eh_thaw(struct ata_port *ap);
20f733e7
BR
423static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
424
2a47ce06
JG
425static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
426 unsigned int port);
47c2b677
JG
427static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
428static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
429 void __iomem *mmio);
c9d39130
JG
430static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
431 unsigned int n_hc);
522479fb
JG
432static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
433static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
47c2b677 434
2a47ce06
JG
435static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
436 unsigned int port);
47c2b677
JG
437static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
438static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
439 void __iomem *mmio);
c9d39130
JG
440static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
441 unsigned int n_hc);
522479fb
JG
442static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
443static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
c9d39130
JG
444static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
445 unsigned int port_no);
47c2b677 446
c5d3e45a
JG
447static struct scsi_host_template mv5_sht = {
448 .module = THIS_MODULE,
449 .name = DRV_NAME,
450 .ioctl = ata_scsi_ioctl,
451 .queuecommand = ata_scsi_queuecmd,
452 .can_queue = ATA_DEF_QUEUE,
453 .this_id = ATA_SHT_THIS_ID,
baf14aa1 454 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a
JG
455 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
456 .emulated = ATA_SHT_EMULATED,
457 .use_clustering = 1,
458 .proc_name = DRV_NAME,
459 .dma_boundary = MV_DMA_BOUNDARY,
460 .slave_configure = ata_scsi_slave_config,
461 .slave_destroy = ata_scsi_slave_destroy,
462 .bios_param = ata_std_bios_param,
463};
464
465static struct scsi_host_template mv6_sht = {
20f733e7
BR
466 .module = THIS_MODULE,
467 .name = DRV_NAME,
468 .ioctl = ata_scsi_ioctl,
469 .queuecommand = ata_scsi_queuecmd,
c5d3e45a 470 .can_queue = ATA_DEF_QUEUE,
20f733e7 471 .this_id = ATA_SHT_THIS_ID,
baf14aa1 472 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7
BR
473 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
474 .emulated = ATA_SHT_EMULATED,
d88184fb 475 .use_clustering = 1,
20f733e7
BR
476 .proc_name = DRV_NAME,
477 .dma_boundary = MV_DMA_BOUNDARY,
478 .slave_configure = ata_scsi_slave_config,
ccf68c34 479 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 480 .bios_param = ata_std_bios_param,
20f733e7
BR
481};
482
c9d39130
JG
483static const struct ata_port_operations mv5_ops = {
484 .port_disable = ata_port_disable,
485
486 .tf_load = ata_tf_load,
487 .tf_read = ata_tf_read,
488 .check_status = ata_check_status,
489 .exec_command = ata_exec_command,
490 .dev_select = ata_std_dev_select,
491
cffacd85 492 .cable_detect = ata_cable_sata,
c9d39130
JG
493
494 .qc_prep = mv_qc_prep,
495 .qc_issue = mv_qc_issue,
0d5ff566 496 .data_xfer = ata_data_xfer,
c9d39130 497
c9d39130 498 .irq_clear = mv_irq_clear,
246ce3b6
AI
499 .irq_on = ata_irq_on,
500 .irq_ack = ata_irq_ack,
c9d39130 501
bdd4ddde
JG
502 .error_handler = mv_error_handler,
503 .post_internal_cmd = mv_post_int_cmd,
504 .freeze = mv_eh_freeze,
505 .thaw = mv_eh_thaw,
506
c9d39130
JG
507 .scr_read = mv5_scr_read,
508 .scr_write = mv5_scr_write,
509
510 .port_start = mv_port_start,
511 .port_stop = mv_port_stop,
c9d39130
JG
512};
513
514static const struct ata_port_operations mv6_ops = {
20f733e7
BR
515 .port_disable = ata_port_disable,
516
517 .tf_load = ata_tf_load,
518 .tf_read = ata_tf_read,
519 .check_status = ata_check_status,
520 .exec_command = ata_exec_command,
521 .dev_select = ata_std_dev_select,
522
cffacd85 523 .cable_detect = ata_cable_sata,
20f733e7 524
31961943
BR
525 .qc_prep = mv_qc_prep,
526 .qc_issue = mv_qc_issue,
0d5ff566 527 .data_xfer = ata_data_xfer,
20f733e7 528
20f733e7 529 .irq_clear = mv_irq_clear,
246ce3b6
AI
530 .irq_on = ata_irq_on,
531 .irq_ack = ata_irq_ack,
20f733e7 532
bdd4ddde
JG
533 .error_handler = mv_error_handler,
534 .post_internal_cmd = mv_post_int_cmd,
535 .freeze = mv_eh_freeze,
536 .thaw = mv_eh_thaw,
537
20f733e7
BR
538 .scr_read = mv_scr_read,
539 .scr_write = mv_scr_write,
540
31961943
BR
541 .port_start = mv_port_start,
542 .port_stop = mv_port_stop,
20f733e7
BR
543};
544
e4e7b892
JG
545static const struct ata_port_operations mv_iie_ops = {
546 .port_disable = ata_port_disable,
547
548 .tf_load = ata_tf_load,
549 .tf_read = ata_tf_read,
550 .check_status = ata_check_status,
551 .exec_command = ata_exec_command,
552 .dev_select = ata_std_dev_select,
553
cffacd85 554 .cable_detect = ata_cable_sata,
e4e7b892
JG
555
556 .qc_prep = mv_qc_prep_iie,
557 .qc_issue = mv_qc_issue,
0d5ff566 558 .data_xfer = ata_data_xfer,
e4e7b892 559
e4e7b892 560 .irq_clear = mv_irq_clear,
246ce3b6
AI
561 .irq_on = ata_irq_on,
562 .irq_ack = ata_irq_ack,
e4e7b892 563
bdd4ddde
JG
564 .error_handler = mv_error_handler,
565 .post_internal_cmd = mv_post_int_cmd,
566 .freeze = mv_eh_freeze,
567 .thaw = mv_eh_thaw,
568
e4e7b892
JG
569 .scr_read = mv_scr_read,
570 .scr_write = mv_scr_write,
571
572 .port_start = mv_port_start,
573 .port_stop = mv_port_stop,
e4e7b892
JG
574};
575
98ac62de 576static const struct ata_port_info mv_port_info[] = {
20f733e7 577 { /* chip_504x */
cca3974e 578 .flags = MV_COMMON_FLAGS,
31961943 579 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 580 .udma_mask = ATA_UDMA6,
c9d39130 581 .port_ops = &mv5_ops,
20f733e7
BR
582 },
583 { /* chip_508x */
c5d3e45a 584 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 585 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 586 .udma_mask = ATA_UDMA6,
c9d39130 587 .port_ops = &mv5_ops,
20f733e7 588 },
47c2b677 589 { /* chip_5080 */
c5d3e45a 590 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 591 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 592 .udma_mask = ATA_UDMA6,
c9d39130 593 .port_ops = &mv5_ops,
47c2b677 594 },
20f733e7 595 { /* chip_604x */
c5d3e45a 596 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
31961943 597 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 598 .udma_mask = ATA_UDMA6,
c9d39130 599 .port_ops = &mv6_ops,
20f733e7
BR
600 },
601 { /* chip_608x */
c5d3e45a
JG
602 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
603 MV_FLAG_DUAL_HC,
31961943 604 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 605 .udma_mask = ATA_UDMA6,
c9d39130 606 .port_ops = &mv6_ops,
20f733e7 607 },
e4e7b892 608 { /* chip_6042 */
c5d3e45a 609 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 610 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 611 .udma_mask = ATA_UDMA6,
e4e7b892
JG
612 .port_ops = &mv_iie_ops,
613 },
614 { /* chip_7042 */
c5d3e45a 615 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 616 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 617 .udma_mask = ATA_UDMA6,
e4e7b892
JG
618 .port_ops = &mv_iie_ops,
619 },
20f733e7
BR
620};
621
3b7d697d 622static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
623 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
624 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
625 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
626 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
627 /* RocketRAID 1740/174x have different identifiers */
628 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
629 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
630
631 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
632 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
633 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
634 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
635 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
636
637 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
638
d9f9c6bc
FA
639 /* Adaptec 1430SA */
640 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
641
e93f09dc
OJ
642 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
643
6a3d586d
MT
644 /* add Marvell 7042 support */
645 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
646
2d2744fc 647 { } /* terminate list */
20f733e7
BR
648};
649
650static struct pci_driver mv_pci_driver = {
651 .name = DRV_NAME,
652 .id_table = mv_pci_tbl,
653 .probe = mv_init_one,
654 .remove = ata_pci_remove_one,
655};
656
47c2b677
JG
657static const struct mv_hw_ops mv5xxx_ops = {
658 .phy_errata = mv5_phy_errata,
659 .enable_leds = mv5_enable_leds,
660 .read_preamp = mv5_read_preamp,
661 .reset_hc = mv5_reset_hc,
522479fb
JG
662 .reset_flash = mv5_reset_flash,
663 .reset_bus = mv5_reset_bus,
47c2b677
JG
664};
665
666static const struct mv_hw_ops mv6xxx_ops = {
667 .phy_errata = mv6_phy_errata,
668 .enable_leds = mv6_enable_leds,
669 .read_preamp = mv6_read_preamp,
670 .reset_hc = mv6_reset_hc,
522479fb
JG
671 .reset_flash = mv6_reset_flash,
672 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
673};
674
ddef9bb3
JG
675/*
676 * module options
677 */
678static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
679
680
d88184fb
JG
681/* move to PCI layer or libata core? */
682static int pci_go_64(struct pci_dev *pdev)
683{
684 int rc;
685
686 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
687 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
688 if (rc) {
689 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
690 if (rc) {
691 dev_printk(KERN_ERR, &pdev->dev,
692 "64-bit DMA enable failed\n");
693 return rc;
694 }
695 }
696 } else {
697 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
698 if (rc) {
699 dev_printk(KERN_ERR, &pdev->dev,
700 "32-bit DMA enable failed\n");
701 return rc;
702 }
703 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
704 if (rc) {
705 dev_printk(KERN_ERR, &pdev->dev,
706 "32-bit consistent DMA enable failed\n");
707 return rc;
708 }
709 }
710
711 return rc;
712}
713
20f733e7
BR
714/*
715 * Functions
716 */
717
718static inline void writelfl(unsigned long data, void __iomem *addr)
719{
720 writel(data, addr);
721 (void) readl(addr); /* flush to avoid PCI posted write */
722}
723
20f733e7
BR
724static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
725{
726 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
727}
728
c9d39130
JG
729static inline unsigned int mv_hc_from_port(unsigned int port)
730{
731 return port >> MV_PORT_HC_SHIFT;
732}
733
734static inline unsigned int mv_hardport_from_port(unsigned int port)
735{
736 return port & MV_PORT_MASK;
737}
738
739static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
740 unsigned int port)
741{
742 return mv_hc_base(base, mv_hc_from_port(port));
743}
744
20f733e7
BR
745static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
746{
c9d39130 747 return mv_hc_base_from_port(base, port) +
8b260248 748 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 749 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
750}
751
752static inline void __iomem *mv_ap_base(struct ata_port *ap)
753{
0d5ff566 754 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
20f733e7
BR
755}
756
cca3974e 757static inline int mv_get_hc_count(unsigned long port_flags)
31961943 758{
cca3974e 759 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
760}
761
762static void mv_irq_clear(struct ata_port *ap)
20f733e7 763{
20f733e7
BR
764}
765
c5d3e45a
JG
766static void mv_set_edma_ptrs(void __iomem *port_mmio,
767 struct mv_host_priv *hpriv,
768 struct mv_port_priv *pp)
769{
bdd4ddde
JG
770 u32 index;
771
c5d3e45a
JG
772 /*
773 * initialize request queue
774 */
bdd4ddde
JG
775 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
776
c5d3e45a
JG
777 WARN_ON(pp->crqb_dma & 0x3ff);
778 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 779 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
780 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
781
782 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 783 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
784 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
785 else
bdd4ddde 786 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
787
788 /*
789 * initialize response queue
790 */
bdd4ddde
JG
791 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
792
c5d3e45a
JG
793 WARN_ON(pp->crpb_dma & 0xff);
794 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
795
796 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 797 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
798 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
799 else
bdd4ddde 800 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 801
bdd4ddde 802 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 803 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
804}
805
05b308e1
BR
806/**
807 * mv_start_dma - Enable eDMA engine
808 * @base: port base address
809 * @pp: port private data
810 *
beec7dbc
TH
811 * Verify the local cache of the eDMA state is accurate with a
812 * WARN_ON.
05b308e1
BR
813 *
814 * LOCKING:
815 * Inherited from caller.
816 */
c5d3e45a
JG
817static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
818 struct mv_port_priv *pp)
20f733e7 819{
c5d3e45a 820 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
bdd4ddde
JG
821 /* clear EDMA event indicators, if any */
822 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
823
824 mv_set_edma_ptrs(base, hpriv, pp);
825
afb0edd9
BR
826 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
827 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
828 }
beec7dbc 829 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
20f733e7
BR
830}
831
05b308e1 832/**
0ea9e179 833 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
834 * @ap: ATA channel to manipulate
835 *
beec7dbc
TH
836 * Verify the local cache of the eDMA state is accurate with a
837 * WARN_ON.
05b308e1
BR
838 *
839 * LOCKING:
840 * Inherited from caller.
841 */
0ea9e179 842static int __mv_stop_dma(struct ata_port *ap)
20f733e7 843{
31961943
BR
844 void __iomem *port_mmio = mv_ap_base(ap);
845 struct mv_port_priv *pp = ap->private_data;
31961943 846 u32 reg;
c5d3e45a 847 int i, err = 0;
31961943 848
4537deb5 849 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 850 /* Disable EDMA if active. The disable bit auto clears.
31961943 851 */
31961943
BR
852 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
853 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 854 } else {
beec7dbc 855 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
afb0edd9 856 }
8b260248 857
31961943
BR
858 /* now properly wait for the eDMA to stop */
859 for (i = 1000; i > 0; i--) {
860 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 861 if (!(reg & EDMA_EN))
31961943 862 break;
4537deb5 863
31961943
BR
864 udelay(100);
865 }
866
c5d3e45a 867 if (reg & EDMA_EN) {
f15a1daf 868 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 869 err = -EIO;
31961943 870 }
c5d3e45a
JG
871
872 return err;
20f733e7
BR
873}
874
0ea9e179
JG
875static int mv_stop_dma(struct ata_port *ap)
876{
877 unsigned long flags;
878 int rc;
879
880 spin_lock_irqsave(&ap->host->lock, flags);
881 rc = __mv_stop_dma(ap);
882 spin_unlock_irqrestore(&ap->host->lock, flags);
883
884 return rc;
885}
886
8a70f8dc 887#ifdef ATA_DEBUG
31961943 888static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 889{
31961943
BR
890 int b, w;
891 for (b = 0; b < bytes; ) {
892 DPRINTK("%p: ", start + b);
893 for (w = 0; b < bytes && w < 4; w++) {
894 printk("%08x ",readl(start + b));
895 b += sizeof(u32);
896 }
897 printk("\n");
898 }
31961943 899}
8a70f8dc
JG
900#endif
901
31961943
BR
902static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
903{
904#ifdef ATA_DEBUG
905 int b, w;
906 u32 dw;
907 for (b = 0; b < bytes; ) {
908 DPRINTK("%02x: ", b);
909 for (w = 0; b < bytes && w < 4; w++) {
910 (void) pci_read_config_dword(pdev,b,&dw);
911 printk("%08x ",dw);
912 b += sizeof(u32);
913 }
914 printk("\n");
915 }
916#endif
917}
918static void mv_dump_all_regs(void __iomem *mmio_base, int port,
919 struct pci_dev *pdev)
920{
921#ifdef ATA_DEBUG
8b260248 922 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
923 port >> MV_PORT_HC_SHIFT);
924 void __iomem *port_base;
925 int start_port, num_ports, p, start_hc, num_hcs, hc;
926
927 if (0 > port) {
928 start_hc = start_port = 0;
929 num_ports = 8; /* shld be benign for 4 port devs */
930 num_hcs = 2;
931 } else {
932 start_hc = port >> MV_PORT_HC_SHIFT;
933 start_port = port;
934 num_ports = num_hcs = 1;
935 }
8b260248 936 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
937 num_ports > 1 ? num_ports - 1 : start_port);
938
939 if (NULL != pdev) {
940 DPRINTK("PCI config space regs:\n");
941 mv_dump_pci_cfg(pdev, 0x68);
942 }
943 DPRINTK("PCI regs:\n");
944 mv_dump_mem(mmio_base+0xc00, 0x3c);
945 mv_dump_mem(mmio_base+0xd00, 0x34);
946 mv_dump_mem(mmio_base+0xf00, 0x4);
947 mv_dump_mem(mmio_base+0x1d00, 0x6c);
948 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 949 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
950 DPRINTK("HC regs (HC %i):\n", hc);
951 mv_dump_mem(hc_base, 0x1c);
952 }
953 for (p = start_port; p < start_port + num_ports; p++) {
954 port_base = mv_port_base(mmio_base, p);
955 DPRINTK("EDMA regs (port %i):\n",p);
956 mv_dump_mem(port_base, 0x54);
957 DPRINTK("SATA regs (port %i):\n",p);
958 mv_dump_mem(port_base+0x300, 0x60);
959 }
960#endif
20f733e7
BR
961}
962
963static unsigned int mv_scr_offset(unsigned int sc_reg_in)
964{
965 unsigned int ofs;
966
967 switch (sc_reg_in) {
968 case SCR_STATUS:
969 case SCR_CONTROL:
970 case SCR_ERROR:
971 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
972 break;
973 case SCR_ACTIVE:
974 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
975 break;
976 default:
977 ofs = 0xffffffffU;
978 break;
979 }
980 return ofs;
981}
982
da3dbb17 983static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
984{
985 unsigned int ofs = mv_scr_offset(sc_reg_in);
986
da3dbb17
TH
987 if (ofs != 0xffffffffU) {
988 *val = readl(mv_ap_base(ap) + ofs);
989 return 0;
990 } else
991 return -EINVAL;
20f733e7
BR
992}
993
da3dbb17 994static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
995{
996 unsigned int ofs = mv_scr_offset(sc_reg_in);
997
da3dbb17 998 if (ofs != 0xffffffffU) {
20f733e7 999 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1000 return 0;
1001 } else
1002 return -EINVAL;
20f733e7
BR
1003}
1004
c5d3e45a
JG
1005static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1006 void __iomem *port_mmio)
e4e7b892
JG
1007{
1008 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1009
1010 /* set up non-NCQ EDMA configuration */
c5d3e45a 1011 cfg &= ~(1 << 9); /* disable eQue */
e4e7b892 1012
e728eabe
JG
1013 if (IS_GEN_I(hpriv)) {
1014 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1015 cfg |= (1 << 8); /* enab config burst size mask */
e728eabe 1016 }
e4e7b892 1017
e728eabe
JG
1018 else if (IS_GEN_II(hpriv)) {
1019 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1020 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
e728eabe
JG
1021 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1022 }
e4e7b892
JG
1023
1024 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1025 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1026 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892
JG
1027 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1028 cfg |= (1 << 18); /* enab early completion */
e728eabe
JG
1029 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1030 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
4537deb5 1031 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
e4e7b892
JG
1032 }
1033
1034 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1035}
1036
05b308e1
BR
1037/**
1038 * mv_port_start - Port specific init/start routine.
1039 * @ap: ATA channel to manipulate
1040 *
1041 * Allocate and point to DMA memory, init port private memory,
1042 * zero indices.
1043 *
1044 * LOCKING:
1045 * Inherited from caller.
1046 */
31961943
BR
1047static int mv_port_start(struct ata_port *ap)
1048{
cca3974e
JG
1049 struct device *dev = ap->host->dev;
1050 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1051 struct mv_port_priv *pp;
1052 void __iomem *port_mmio = mv_ap_base(ap);
1053 void *mem;
1054 dma_addr_t mem_dma;
0ea9e179 1055 unsigned long flags;
24dc5f33 1056 int rc;
31961943 1057
24dc5f33 1058 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1059 if (!pp)
24dc5f33 1060 return -ENOMEM;
31961943 1061
24dc5f33
TH
1062 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1063 GFP_KERNEL);
6037d6bb 1064 if (!mem)
24dc5f33 1065 return -ENOMEM;
31961943
BR
1066 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1067
6037d6bb
JG
1068 rc = ata_pad_alloc(ap, dev);
1069 if (rc)
24dc5f33 1070 return rc;
6037d6bb 1071
8b260248 1072 /* First item in chunk of DMA memory:
31961943
BR
1073 * 32-slot command request table (CRQB), 32 bytes each in size
1074 */
1075 pp->crqb = mem;
1076 pp->crqb_dma = mem_dma;
1077 mem += MV_CRQB_Q_SZ;
1078 mem_dma += MV_CRQB_Q_SZ;
1079
8b260248 1080 /* Second item:
31961943
BR
1081 * 32-slot command response table (CRPB), 8 bytes each in size
1082 */
1083 pp->crpb = mem;
1084 pp->crpb_dma = mem_dma;
1085 mem += MV_CRPB_Q_SZ;
1086 mem_dma += MV_CRPB_Q_SZ;
1087
1088 /* Third item:
1089 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1090 */
1091 pp->sg_tbl = mem;
1092 pp->sg_tbl_dma = mem_dma;
1093
0ea9e179
JG
1094 spin_lock_irqsave(&ap->host->lock, flags);
1095
c5d3e45a 1096 mv_edma_cfg(ap, hpriv, port_mmio);
e4e7b892 1097
c5d3e45a 1098 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1099
0ea9e179
JG
1100 spin_unlock_irqrestore(&ap->host->lock, flags);
1101
31961943
BR
1102 /* Don't turn on EDMA here...do it before DMA commands only. Else
1103 * we'll be unable to send non-data, PIO, etc due to restricted access
1104 * to shadow regs.
1105 */
1106 ap->private_data = pp;
1107 return 0;
1108}
1109
05b308e1
BR
1110/**
1111 * mv_port_stop - Port specific cleanup/stop routine.
1112 * @ap: ATA channel to manipulate
1113 *
1114 * Stop DMA, cleanup port memory.
1115 *
1116 * LOCKING:
cca3974e 1117 * This routine uses the host lock to protect the DMA stop.
05b308e1 1118 */
31961943
BR
1119static void mv_port_stop(struct ata_port *ap)
1120{
31961943 1121 mv_stop_dma(ap);
31961943
BR
1122}
1123
05b308e1
BR
1124/**
1125 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1126 * @qc: queued command whose SG list to source from
1127 *
1128 * Populate the SG list and mark the last entry.
1129 *
1130 * LOCKING:
1131 * Inherited from caller.
1132 */
d88184fb 1133static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1134{
1135 struct mv_port_priv *pp = qc->ap->private_data;
d88184fb 1136 unsigned int n_sg = 0;
972c26bd 1137 struct scatterlist *sg;
d88184fb 1138 struct mv_sg *mv_sg;
31961943 1139
d88184fb 1140 mv_sg = pp->sg_tbl;
972c26bd 1141 ata_for_each_sg(sg, qc) {
d88184fb
JG
1142 dma_addr_t addr = sg_dma_address(sg);
1143 u32 sg_len = sg_dma_len(sg);
22374677 1144
4007b493
OJ
1145 while (sg_len) {
1146 u32 offset = addr & 0xffff;
1147 u32 len = sg_len;
22374677 1148
4007b493
OJ
1149 if ((offset + sg_len > 0x10000))
1150 len = 0x10000 - offset;
1151
1152 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1153 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1154 mv_sg->flags_size = cpu_to_le32(len);
1155
1156 sg_len -= len;
1157 addr += len;
1158
1159 if (!sg_len && ata_sg_is_last(sg, qc))
1160 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1161
1162 mv_sg++;
1163 n_sg++;
1164 }
22374677 1165
31961943 1166 }
d88184fb
JG
1167
1168 return n_sg;
31961943
BR
1169}
1170
e1469874 1171static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1172{
559eedad 1173 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1174 (last ? CRQB_CMD_LAST : 0);
559eedad 1175 *cmdw = cpu_to_le16(tmp);
31961943
BR
1176}
1177
05b308e1
BR
1178/**
1179 * mv_qc_prep - Host specific command preparation.
1180 * @qc: queued command to prepare
1181 *
1182 * This routine simply redirects to the general purpose routine
1183 * if command is not DMA. Else, it handles prep of the CRQB
1184 * (command request block), does some sanity checking, and calls
1185 * the SG load routine.
1186 *
1187 * LOCKING:
1188 * Inherited from caller.
1189 */
31961943
BR
1190static void mv_qc_prep(struct ata_queued_cmd *qc)
1191{
1192 struct ata_port *ap = qc->ap;
1193 struct mv_port_priv *pp = ap->private_data;
e1469874 1194 __le16 *cw;
31961943
BR
1195 struct ata_taskfile *tf;
1196 u16 flags = 0;
a6432436 1197 unsigned in_index;
31961943 1198
c5d3e45a 1199 if (qc->tf.protocol != ATA_PROT_DMA)
31961943 1200 return;
20f733e7 1201
31961943
BR
1202 /* Fill in command request block
1203 */
e4e7b892 1204 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1205 flags |= CRQB_FLAG_READ;
beec7dbc 1206 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1207 flags |= qc->tag << CRQB_TAG_SHIFT;
4537deb5 1208 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
31961943 1209
bdd4ddde
JG
1210 /* get current queue index from software */
1211 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1212
1213 pp->crqb[in_index].sg_addr =
31961943 1214 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
a6432436 1215 pp->crqb[in_index].sg_addr_hi =
31961943 1216 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
a6432436 1217 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1218
a6432436 1219 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1220 tf = &qc->tf;
1221
1222 /* Sadly, the CRQB cannot accomodate all registers--there are
1223 * only 11 bytes...so we must pick and choose required
1224 * registers based on the command. So, we drop feature and
1225 * hob_feature for [RW] DMA commands, but they are needed for
1226 * NCQ. NCQ will drop hob_nsect.
20f733e7 1227 */
31961943
BR
1228 switch (tf->command) {
1229 case ATA_CMD_READ:
1230 case ATA_CMD_READ_EXT:
1231 case ATA_CMD_WRITE:
1232 case ATA_CMD_WRITE_EXT:
c15d85c8 1233 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1234 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1235 break;
1236#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1237 case ATA_CMD_FPDMA_READ:
1238 case ATA_CMD_FPDMA_WRITE:
8b260248 1239 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1240 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1241 break;
1242#endif /* FIXME: remove this line when NCQ added */
1243 default:
1244 /* The only other commands EDMA supports in non-queued and
1245 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1246 * of which are defined/used by Linux. If we get here, this
1247 * driver needs work.
1248 *
1249 * FIXME: modify libata to give qc_prep a return value and
1250 * return error here.
1251 */
1252 BUG_ON(tf->command);
1253 break;
1254 }
1255 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1256 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1257 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1258 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1259 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1260 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1261 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1262 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1263 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1264
e4e7b892
JG
1265 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1266 return;
1267 mv_fill_sg(qc);
1268}
1269
1270/**
1271 * mv_qc_prep_iie - Host specific command preparation.
1272 * @qc: queued command to prepare
1273 *
1274 * This routine simply redirects to the general purpose routine
1275 * if command is not DMA. Else, it handles prep of the CRQB
1276 * (command request block), does some sanity checking, and calls
1277 * the SG load routine.
1278 *
1279 * LOCKING:
1280 * Inherited from caller.
1281 */
1282static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1283{
1284 struct ata_port *ap = qc->ap;
1285 struct mv_port_priv *pp = ap->private_data;
1286 struct mv_crqb_iie *crqb;
1287 struct ata_taskfile *tf;
a6432436 1288 unsigned in_index;
e4e7b892
JG
1289 u32 flags = 0;
1290
c5d3e45a 1291 if (qc->tf.protocol != ATA_PROT_DMA)
e4e7b892
JG
1292 return;
1293
e4e7b892
JG
1294 /* Fill in Gen IIE command request block
1295 */
1296 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1297 flags |= CRQB_FLAG_READ;
1298
beec7dbc 1299 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1300 flags |= qc->tag << CRQB_TAG_SHIFT;
bdd4ddde 1301 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
4537deb5 1302 what we use as our tag */
e4e7b892 1303
bdd4ddde
JG
1304 /* get current queue index from software */
1305 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1306
1307 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
e4e7b892
JG
1308 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1309 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1310 crqb->flags = cpu_to_le32(flags);
1311
1312 tf = &qc->tf;
1313 crqb->ata_cmd[0] = cpu_to_le32(
1314 (tf->command << 16) |
1315 (tf->feature << 24)
1316 );
1317 crqb->ata_cmd[1] = cpu_to_le32(
1318 (tf->lbal << 0) |
1319 (tf->lbam << 8) |
1320 (tf->lbah << 16) |
1321 (tf->device << 24)
1322 );
1323 crqb->ata_cmd[2] = cpu_to_le32(
1324 (tf->hob_lbal << 0) |
1325 (tf->hob_lbam << 8) |
1326 (tf->hob_lbah << 16) |
1327 (tf->hob_feature << 24)
1328 );
1329 crqb->ata_cmd[3] = cpu_to_le32(
1330 (tf->nsect << 0) |
1331 (tf->hob_nsect << 8)
1332 );
1333
1334 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1335 return;
31961943
BR
1336 mv_fill_sg(qc);
1337}
1338
05b308e1
BR
1339/**
1340 * mv_qc_issue - Initiate a command to the host
1341 * @qc: queued command to start
1342 *
1343 * This routine simply redirects to the general purpose routine
1344 * if command is not DMA. Else, it sanity checks our local
1345 * caches of the request producer/consumer indices then enables
1346 * DMA and bumps the request producer index.
1347 *
1348 * LOCKING:
1349 * Inherited from caller.
1350 */
9a3d9eb0 1351static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1352{
c5d3e45a
JG
1353 struct ata_port *ap = qc->ap;
1354 void __iomem *port_mmio = mv_ap_base(ap);
1355 struct mv_port_priv *pp = ap->private_data;
1356 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde 1357 u32 in_index;
31961943 1358
c5d3e45a 1359 if (qc->tf.protocol != ATA_PROT_DMA) {
31961943
BR
1360 /* We're about to send a non-EDMA capable command to the
1361 * port. Turn off EDMA so there won't be problems accessing
1362 * shadow block, etc registers.
1363 */
0ea9e179 1364 __mv_stop_dma(ap);
31961943
BR
1365 return ata_qc_issue_prot(qc);
1366 }
1367
bdd4ddde
JG
1368 mv_start_dma(port_mmio, hpriv, pp);
1369
1370 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
31961943 1371
31961943 1372 /* until we do queuing, the queue should be empty at this point */
a6432436
ML
1373 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1374 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
31961943 1375
bdd4ddde 1376 pp->req_idx++;
31961943 1377
bdd4ddde 1378 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1379
1380 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1381 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1382 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1383
1384 return 0;
1385}
1386
05b308e1
BR
1387/**
1388 * mv_err_intr - Handle error interrupts on the port
1389 * @ap: ATA channel to manipulate
9b358e30 1390 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1391 *
1392 * In most cases, just clear the interrupt and move on. However,
1393 * some cases require an eDMA reset, which is done right before
1394 * the COMRESET in mv_phy_reset(). The SERR case requires a
1395 * clear of pending errors in the SATA SERROR register. Finally,
1396 * if the port disabled DMA, update our cached copy to match.
1397 *
1398 * LOCKING:
1399 * Inherited from caller.
1400 */
bdd4ddde 1401static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1402{
1403 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1404 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1405 struct mv_port_priv *pp = ap->private_data;
1406 struct mv_host_priv *hpriv = ap->host->private_data;
1407 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1408 unsigned int action = 0, err_mask = 0;
1409 struct ata_eh_info *ehi = &ap->eh_info;
20f733e7 1410
bdd4ddde 1411 ata_ehi_clear_desc(ehi);
20f733e7 1412
bdd4ddde
JG
1413 if (!edma_enabled) {
1414 /* just a guess: do we need to do this? should we
1415 * expand this, and do it in all cases?
1416 */
81952c54
TH
1417 sata_scr_read(ap, SCR_ERROR, &serr);
1418 sata_scr_write_flush(ap, SCR_ERROR, serr);
20f733e7 1419 }
bdd4ddde
JG
1420
1421 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1422
1423 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1424
1425 /*
1426 * all generations share these EDMA error cause bits
1427 */
1428
1429 if (edma_err_cause & EDMA_ERR_DEV)
1430 err_mask |= AC_ERR_DEV;
1431 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1432 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1433 EDMA_ERR_INTRL_PAR)) {
1434 err_mask |= AC_ERR_ATA_BUS;
1435 action |= ATA_EH_HARDRESET;
b64bbc39 1436 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1437 }
1438 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1439 ata_ehi_hotplugged(ehi);
1440 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1441 "dev disconnect" : "dev connect");
bdd4ddde
JG
1442 }
1443
ee9ccdf7 1444 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1445 eh_freeze_mask = EDMA_EH_FREEZE_5;
1446
1447 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1448 struct mv_port_priv *pp = ap->private_data;
1449 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1450 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1451 }
1452 } else {
1453 eh_freeze_mask = EDMA_EH_FREEZE;
1454
1455 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1456 struct mv_port_priv *pp = ap->private_data;
1457 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1458 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1459 }
1460
1461 if (edma_err_cause & EDMA_ERR_SERR) {
1462 sata_scr_read(ap, SCR_ERROR, &serr);
1463 sata_scr_write_flush(ap, SCR_ERROR, serr);
1464 err_mask = AC_ERR_ATA_BUS;
1465 action |= ATA_EH_HARDRESET;
1466 }
afb0edd9 1467 }
20f733e7
BR
1468
1469 /* Clear EDMA now that SERR cleanup done */
1470 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1471
bdd4ddde
JG
1472 if (!err_mask) {
1473 err_mask = AC_ERR_OTHER;
1474 action |= ATA_EH_HARDRESET;
1475 }
1476
1477 ehi->serror |= serr;
1478 ehi->action |= action;
1479
1480 if (qc)
1481 qc->err_mask |= err_mask;
1482 else
1483 ehi->err_mask |= err_mask;
1484
1485 if (edma_err_cause & eh_freeze_mask)
1486 ata_port_freeze(ap);
1487 else
1488 ata_port_abort(ap);
1489}
1490
1491static void mv_intr_pio(struct ata_port *ap)
1492{
1493 struct ata_queued_cmd *qc;
1494 u8 ata_status;
1495
1496 /* ignore spurious intr if drive still BUSY */
1497 ata_status = readb(ap->ioaddr.status_addr);
1498 if (unlikely(ata_status & ATA_BUSY))
1499 return;
1500
1501 /* get active ATA command */
1502 qc = ata_qc_from_tag(ap, ap->active_tag);
1503 if (unlikely(!qc)) /* no active tag */
1504 return;
1505 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1506 return;
1507
1508 /* and finally, complete the ATA command */
1509 qc->err_mask |= ac_err_mask(ata_status);
1510 ata_qc_complete(qc);
1511}
1512
1513static void mv_intr_edma(struct ata_port *ap)
1514{
1515 void __iomem *port_mmio = mv_ap_base(ap);
1516 struct mv_host_priv *hpriv = ap->host->private_data;
1517 struct mv_port_priv *pp = ap->private_data;
1518 struct ata_queued_cmd *qc;
1519 u32 out_index, in_index;
1520 bool work_done = false;
1521
1522 /* get h/w response queue pointer */
1523 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1524 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1525
1526 while (1) {
1527 u16 status;
6c1153e0 1528 unsigned int tag;
bdd4ddde
JG
1529
1530 /* get s/w response queue last-read pointer, and compare */
1531 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1532 if (in_index == out_index)
1533 break;
1534
bdd4ddde 1535 /* 50xx: get active ATA command */
0ea9e179 1536 if (IS_GEN_I(hpriv))
6c1153e0 1537 tag = ap->active_tag;
bdd4ddde 1538
6c1153e0
JG
1539 /* Gen II/IIE: get active ATA command via tag, to enable
1540 * support for queueing. this works transparently for
1541 * queued and non-queued modes.
bdd4ddde 1542 */
6c1153e0
JG
1543 else if (IS_GEN_II(hpriv))
1544 tag = (le16_to_cpu(pp->crpb[out_index].id)
1545 >> CRPB_IOID_SHIFT_6) & 0x3f;
bdd4ddde 1546
6c1153e0
JG
1547 else /* IS_GEN_IIE */
1548 tag = (le16_to_cpu(pp->crpb[out_index].id)
1549 >> CRPB_IOID_SHIFT_7) & 0x3f;
bdd4ddde 1550
6c1153e0 1551 qc = ata_qc_from_tag(ap, tag);
bdd4ddde
JG
1552
1553 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1554 * bits (WARNING: might not necessarily be associated
1555 * with this command), which -should- be clear
1556 * if all is well
1557 */
1558 status = le16_to_cpu(pp->crpb[out_index].flags);
1559 if (unlikely(status & 0xff)) {
1560 mv_err_intr(ap, qc);
1561 return;
1562 }
1563
1564 /* and finally, complete the ATA command */
1565 if (qc) {
1566 qc->err_mask |=
1567 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1568 ata_qc_complete(qc);
1569 }
1570
0ea9e179 1571 /* advance software response queue pointer, to
bdd4ddde
JG
1572 * indicate (after the loop completes) to hardware
1573 * that we have consumed a response queue entry.
1574 */
1575 work_done = true;
1576 pp->resp_idx++;
1577 }
1578
1579 if (work_done)
1580 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1581 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1582 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1583}
1584
05b308e1
BR
1585/**
1586 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1587 * @host: host specific structure
05b308e1
BR
1588 * @relevant: port error bits relevant to this host controller
1589 * @hc: which host controller we're to look at
1590 *
1591 * Read then write clear the HC interrupt status then walk each
1592 * port connected to the HC and see if it needs servicing. Port
1593 * success ints are reported in the HC interrupt status reg, the
1594 * port error ints are reported in the higher level main
1595 * interrupt status register and thus are passed in via the
1596 * 'relevant' argument.
1597 *
1598 * LOCKING:
1599 * Inherited from caller.
1600 */
cca3974e 1601static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1602{
0d5ff566 1603 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7 1604 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1605 u32 hc_irq_cause;
c5d3e45a 1606 int port, port0;
20f733e7 1607
35177265 1608 if (hc == 0)
20f733e7 1609 port0 = 0;
35177265 1610 else
20f733e7 1611 port0 = MV_PORTS_PER_HC;
20f733e7
BR
1612
1613 /* we'll need the HC success int register in most cases */
1614 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1615 if (!hc_irq_cause)
1616 return;
1617
1618 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1619
1620 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1621 hc,relevant,hc_irq_cause);
1622
1623 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
cca3974e 1624 struct ata_port *ap = host->ports[port];
63af2a5c 1625 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1626 int have_err_bits, hard_port, shift;
55d8ca4f 1627
bdd4ddde 1628 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1629 continue;
1630
31961943 1631 shift = port << 1; /* (port * 2) */
20f733e7
BR
1632 if (port >= MV_PORTS_PER_HC) {
1633 shift++; /* skip bit 8 in the HC Main IRQ reg */
1634 }
bdd4ddde
JG
1635 have_err_bits = ((PORT0_ERR << shift) & relevant);
1636
1637 if (unlikely(have_err_bits)) {
1638 struct ata_queued_cmd *qc;
8b260248 1639
20f733e7 1640 qc = ata_qc_from_tag(ap, ap->active_tag);
bdd4ddde
JG
1641 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1642 continue;
1643
1644 mv_err_intr(ap, qc);
1645 continue;
1646 }
1647
1648 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1649
1650 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1651 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1652 mv_intr_edma(ap);
1653 } else {
1654 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1655 mv_intr_pio(ap);
20f733e7
BR
1656 }
1657 }
1658 VPRINTK("EXIT\n");
1659}
1660
bdd4ddde
JG
1661static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1662{
1663 struct ata_port *ap;
1664 struct ata_queued_cmd *qc;
1665 struct ata_eh_info *ehi;
1666 unsigned int i, err_mask, printed = 0;
1667 u32 err_cause;
1668
1669 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1670
1671 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1672 err_cause);
1673
1674 DPRINTK("All regs @ PCI error\n");
1675 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1676
1677 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1678
1679 for (i = 0; i < host->n_ports; i++) {
1680 ap = host->ports[i];
1681 if (!ata_port_offline(ap)) {
1682 ehi = &ap->eh_info;
1683 ata_ehi_clear_desc(ehi);
1684 if (!printed++)
1685 ata_ehi_push_desc(ehi,
1686 "PCI err cause 0x%08x", err_cause);
1687 err_mask = AC_ERR_HOST_BUS;
1688 ehi->action = ATA_EH_HARDRESET;
1689 qc = ata_qc_from_tag(ap, ap->active_tag);
1690 if (qc)
1691 qc->err_mask |= err_mask;
1692 else
1693 ehi->err_mask |= err_mask;
1694
1695 ata_port_freeze(ap);
1696 }
1697 }
1698}
1699
05b308e1 1700/**
c5d3e45a 1701 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1702 * @irq: unused
1703 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1704 *
1705 * Read the read only register to determine if any host
1706 * controllers have pending interrupts. If so, call lower level
1707 * routine to handle. Also check for PCI errors which are only
1708 * reported here.
1709 *
8b260248 1710 * LOCKING:
cca3974e 1711 * This routine holds the host lock while processing pending
05b308e1
BR
1712 * interrupts.
1713 */
7d12e780 1714static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1715{
cca3974e 1716 struct ata_host *host = dev_instance;
20f733e7 1717 unsigned int hc, handled = 0, n_hcs;
0d5ff566 1718 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7
BR
1719 u32 irq_stat;
1720
20f733e7 1721 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
20f733e7
BR
1722
1723 /* check the cases where we either have nothing pending or have read
1724 * a bogus register value which can indicate HW removal or PCI fault
1725 */
35177265 1726 if (!irq_stat || (0xffffffffU == irq_stat))
20f733e7 1727 return IRQ_NONE;
20f733e7 1728
cca3974e
JG
1729 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1730 spin_lock(&host->lock);
20f733e7 1731
bdd4ddde
JG
1732 if (unlikely(irq_stat & PCI_ERR)) {
1733 mv_pci_error(host, mmio);
1734 handled = 1;
1735 goto out_unlock; /* skip all other HC irq handling */
1736 }
1737
20f733e7
BR
1738 for (hc = 0; hc < n_hcs; hc++) {
1739 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1740 if (relevant) {
cca3974e 1741 mv_host_intr(host, relevant, hc);
bdd4ddde 1742 handled = 1;
20f733e7
BR
1743 }
1744 }
615ab953 1745
bdd4ddde 1746out_unlock:
cca3974e 1747 spin_unlock(&host->lock);
20f733e7
BR
1748
1749 return IRQ_RETVAL(handled);
1750}
1751
c9d39130
JG
1752static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1753{
1754 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1755 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1756
1757 return hc_mmio + ofs;
1758}
1759
1760static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1761{
1762 unsigned int ofs;
1763
1764 switch (sc_reg_in) {
1765 case SCR_STATUS:
1766 case SCR_ERROR:
1767 case SCR_CONTROL:
1768 ofs = sc_reg_in * sizeof(u32);
1769 break;
1770 default:
1771 ofs = 0xffffffffU;
1772 break;
1773 }
1774 return ofs;
1775}
1776
da3dbb17 1777static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1778{
0d5ff566
TH
1779 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1780 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1781 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1782
da3dbb17
TH
1783 if (ofs != 0xffffffffU) {
1784 *val = readl(addr + ofs);
1785 return 0;
1786 } else
1787 return -EINVAL;
c9d39130
JG
1788}
1789
da3dbb17 1790static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1791{
0d5ff566
TH
1792 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1793 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1794 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1795
da3dbb17 1796 if (ofs != 0xffffffffU) {
0d5ff566 1797 writelfl(val, addr + ofs);
da3dbb17
TH
1798 return 0;
1799 } else
1800 return -EINVAL;
c9d39130
JG
1801}
1802
522479fb
JG
1803static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1804{
522479fb
JG
1805 int early_5080;
1806
44c10138 1807 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1808
1809 if (!early_5080) {
1810 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1811 tmp |= (1 << 0);
1812 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1813 }
1814
1815 mv_reset_pci_bus(pdev, mmio);
1816}
1817
1818static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1819{
1820 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1821}
1822
47c2b677 1823static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1824 void __iomem *mmio)
1825{
c9d39130
JG
1826 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1827 u32 tmp;
1828
1829 tmp = readl(phy_mmio + MV5_PHY_MODE);
1830
1831 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1832 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1833}
1834
47c2b677 1835static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1836{
522479fb
JG
1837 u32 tmp;
1838
1839 writel(0, mmio + MV_GPIO_PORT_CTL);
1840
1841 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1842
1843 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1844 tmp |= ~(1 << 0);
1845 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1846}
1847
2a47ce06
JG
1848static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1849 unsigned int port)
bca1c4eb 1850{
c9d39130
JG
1851 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1852 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1853 u32 tmp;
1854 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1855
1856 if (fix_apm_sq) {
1857 tmp = readl(phy_mmio + MV5_LT_MODE);
1858 tmp |= (1 << 19);
1859 writel(tmp, phy_mmio + MV5_LT_MODE);
1860
1861 tmp = readl(phy_mmio + MV5_PHY_CTL);
1862 tmp &= ~0x3;
1863 tmp |= 0x1;
1864 writel(tmp, phy_mmio + MV5_PHY_CTL);
1865 }
1866
1867 tmp = readl(phy_mmio + MV5_PHY_MODE);
1868 tmp &= ~mask;
1869 tmp |= hpriv->signal[port].pre;
1870 tmp |= hpriv->signal[port].amps;
1871 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1872}
1873
c9d39130
JG
1874
1875#undef ZERO
1876#define ZERO(reg) writel(0, port_mmio + (reg))
1877static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1878 unsigned int port)
1879{
1880 void __iomem *port_mmio = mv_port_base(mmio, port);
1881
1882 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1883
1884 mv_channel_reset(hpriv, mmio, port);
1885
1886 ZERO(0x028); /* command */
1887 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1888 ZERO(0x004); /* timer */
1889 ZERO(0x008); /* irq err cause */
1890 ZERO(0x00c); /* irq err mask */
1891 ZERO(0x010); /* rq bah */
1892 ZERO(0x014); /* rq inp */
1893 ZERO(0x018); /* rq outp */
1894 ZERO(0x01c); /* respq bah */
1895 ZERO(0x024); /* respq outp */
1896 ZERO(0x020); /* respq inp */
1897 ZERO(0x02c); /* test control */
1898 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1899}
1900#undef ZERO
1901
1902#define ZERO(reg) writel(0, hc_mmio + (reg))
1903static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1904 unsigned int hc)
47c2b677 1905{
c9d39130
JG
1906 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1907 u32 tmp;
1908
1909 ZERO(0x00c);
1910 ZERO(0x010);
1911 ZERO(0x014);
1912 ZERO(0x018);
1913
1914 tmp = readl(hc_mmio + 0x20);
1915 tmp &= 0x1c1c1c1c;
1916 tmp |= 0x03030303;
1917 writel(tmp, hc_mmio + 0x20);
1918}
1919#undef ZERO
1920
1921static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1922 unsigned int n_hc)
1923{
1924 unsigned int hc, port;
1925
1926 for (hc = 0; hc < n_hc; hc++) {
1927 for (port = 0; port < MV_PORTS_PER_HC; port++)
1928 mv5_reset_hc_port(hpriv, mmio,
1929 (hc * MV_PORTS_PER_HC) + port);
1930
1931 mv5_reset_one_hc(hpriv, mmio, hc);
1932 }
1933
1934 return 0;
47c2b677
JG
1935}
1936
101ffae2
JG
1937#undef ZERO
1938#define ZERO(reg) writel(0, mmio + (reg))
1939static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1940{
1941 u32 tmp;
1942
1943 tmp = readl(mmio + MV_PCI_MODE);
1944 tmp &= 0xff00ffff;
1945 writel(tmp, mmio + MV_PCI_MODE);
1946
1947 ZERO(MV_PCI_DISC_TIMER);
1948 ZERO(MV_PCI_MSI_TRIGGER);
1949 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1950 ZERO(HC_MAIN_IRQ_MASK_OFS);
1951 ZERO(MV_PCI_SERR_MASK);
1952 ZERO(PCI_IRQ_CAUSE_OFS);
1953 ZERO(PCI_IRQ_MASK_OFS);
1954 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1955 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1956 ZERO(MV_PCI_ERR_ATTRIBUTE);
1957 ZERO(MV_PCI_ERR_COMMAND);
1958}
1959#undef ZERO
1960
1961static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1962{
1963 u32 tmp;
1964
1965 mv5_reset_flash(hpriv, mmio);
1966
1967 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1968 tmp &= 0x3;
1969 tmp |= (1 << 5) | (1 << 6);
1970 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1971}
1972
1973/**
1974 * mv6_reset_hc - Perform the 6xxx global soft reset
1975 * @mmio: base address of the HBA
1976 *
1977 * This routine only applies to 6xxx parts.
1978 *
1979 * LOCKING:
1980 * Inherited from caller.
1981 */
c9d39130
JG
1982static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1983 unsigned int n_hc)
101ffae2
JG
1984{
1985 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1986 int i, rc = 0;
1987 u32 t;
1988
1989 /* Following procedure defined in PCI "main command and status
1990 * register" table.
1991 */
1992 t = readl(reg);
1993 writel(t | STOP_PCI_MASTER, reg);
1994
1995 for (i = 0; i < 1000; i++) {
1996 udelay(1);
1997 t = readl(reg);
1998 if (PCI_MASTER_EMPTY & t) {
1999 break;
2000 }
2001 }
2002 if (!(PCI_MASTER_EMPTY & t)) {
2003 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2004 rc = 1;
2005 goto done;
2006 }
2007
2008 /* set reset */
2009 i = 5;
2010 do {
2011 writel(t | GLOB_SFT_RST, reg);
2012 t = readl(reg);
2013 udelay(1);
2014 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2015
2016 if (!(GLOB_SFT_RST & t)) {
2017 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2018 rc = 1;
2019 goto done;
2020 }
2021
2022 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2023 i = 5;
2024 do {
2025 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2026 t = readl(reg);
2027 udelay(1);
2028 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2029
2030 if (GLOB_SFT_RST & t) {
2031 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2032 rc = 1;
2033 }
2034done:
2035 return rc;
2036}
2037
47c2b677 2038static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2039 void __iomem *mmio)
2040{
2041 void __iomem *port_mmio;
2042 u32 tmp;
2043
ba3fe8fb
JG
2044 tmp = readl(mmio + MV_RESET_CFG);
2045 if ((tmp & (1 << 0)) == 0) {
47c2b677 2046 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2047 hpriv->signal[idx].pre = 0x1 << 5;
2048 return;
2049 }
2050
2051 port_mmio = mv_port_base(mmio, idx);
2052 tmp = readl(port_mmio + PHY_MODE2);
2053
2054 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2055 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2056}
2057
47c2b677 2058static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2059{
47c2b677 2060 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2061}
2062
c9d39130 2063static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2064 unsigned int port)
bca1c4eb 2065{
c9d39130
JG
2066 void __iomem *port_mmio = mv_port_base(mmio, port);
2067
bca1c4eb 2068 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2069 int fix_phy_mode2 =
2070 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2071 int fix_phy_mode4 =
47c2b677
JG
2072 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2073 u32 m2, tmp;
2074
2075 if (fix_phy_mode2) {
2076 m2 = readl(port_mmio + PHY_MODE2);
2077 m2 &= ~(1 << 16);
2078 m2 |= (1 << 31);
2079 writel(m2, port_mmio + PHY_MODE2);
2080
2081 udelay(200);
2082
2083 m2 = readl(port_mmio + PHY_MODE2);
2084 m2 &= ~((1 << 16) | (1 << 31));
2085 writel(m2, port_mmio + PHY_MODE2);
2086
2087 udelay(200);
2088 }
2089
2090 /* who knows what this magic does */
2091 tmp = readl(port_mmio + PHY_MODE3);
2092 tmp &= ~0x7F800000;
2093 tmp |= 0x2A800000;
2094 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2095
2096 if (fix_phy_mode4) {
47c2b677 2097 u32 m4;
bca1c4eb
JG
2098
2099 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2100
2101 if (hp_flags & MV_HP_ERRATA_60X1B2)
2102 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2103
2104 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2105
2106 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2107
2108 if (hp_flags & MV_HP_ERRATA_60X1B2)
2109 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2110 }
2111
2112 /* Revert values of pre-emphasis and signal amps to the saved ones */
2113 m2 = readl(port_mmio + PHY_MODE2);
2114
2115 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2116 m2 |= hpriv->signal[port].amps;
2117 m2 |= hpriv->signal[port].pre;
47c2b677 2118 m2 &= ~(1 << 16);
bca1c4eb 2119
e4e7b892
JG
2120 /* according to mvSata 3.6.1, some IIE values are fixed */
2121 if (IS_GEN_IIE(hpriv)) {
2122 m2 &= ~0xC30FF01F;
2123 m2 |= 0x0000900F;
2124 }
2125
bca1c4eb
JG
2126 writel(m2, port_mmio + PHY_MODE2);
2127}
2128
c9d39130
JG
2129static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2130 unsigned int port_no)
2131{
2132 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2133
2134 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2135
ee9ccdf7 2136 if (IS_GEN_II(hpriv)) {
c9d39130 2137 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2138 ifctl |= (1 << 7); /* enable gen2i speed */
2139 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2140 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2141 }
2142
2143 udelay(25); /* allow reset propagation */
2144
2145 /* Spec never mentions clearing the bit. Marvell's driver does
2146 * clear the bit, however.
2147 */
2148 writelfl(0, port_mmio + EDMA_CMD_OFS);
2149
2150 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2151
ee9ccdf7 2152 if (IS_GEN_I(hpriv))
c9d39130
JG
2153 mdelay(1);
2154}
2155
05b308e1 2156/**
bdd4ddde 2157 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2158 * @ap: ATA channel to manipulate
2159 *
2160 * Part of this is taken from __sata_phy_reset and modified to
2161 * not sleep since this routine gets called from interrupt level.
2162 *
2163 * LOCKING:
2164 * Inherited from caller. This is coded to safe to call at
2165 * interrupt level, i.e. it does not sleep.
31961943 2166 */
bdd4ddde
JG
2167static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2168 unsigned long deadline)
20f733e7 2169{
095fec88 2170 struct mv_port_priv *pp = ap->private_data;
cca3974e 2171 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2172 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2173 int retry = 5;
2174 u32 sstatus;
20f733e7
BR
2175
2176 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2177
da3dbb17
TH
2178#ifdef DEBUG
2179 {
2180 u32 sstatus, serror, scontrol;
2181
2182 mv_scr_read(ap, SCR_STATUS, &sstatus);
2183 mv_scr_read(ap, SCR_ERROR, &serror);
2184 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2185 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2186 "SCtrl 0x%08x\n", status, serror, scontrol);
2187 }
2188#endif
20f733e7 2189
22374677
JG
2190 /* Issue COMRESET via SControl */
2191comreset_retry:
81952c54 2192 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
bdd4ddde 2193 msleep(1);
22374677 2194
81952c54 2195 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
bdd4ddde 2196 msleep(20);
22374677 2197
31961943 2198 do {
81952c54 2199 sata_scr_read(ap, SCR_STATUS, &sstatus);
62f1d0e6 2200 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2201 break;
22374677 2202
bdd4ddde 2203 msleep(1);
c5d3e45a 2204 } while (time_before(jiffies, deadline));
20f733e7 2205
22374677 2206 /* work around errata */
ee9ccdf7 2207 if (IS_GEN_II(hpriv) &&
22374677
JG
2208 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2209 (retry-- > 0))
2210 goto comreset_retry;
095fec88 2211
da3dbb17
TH
2212#ifdef DEBUG
2213 {
2214 u32 sstatus, serror, scontrol;
2215
2216 mv_scr_read(ap, SCR_STATUS, &sstatus);
2217 mv_scr_read(ap, SCR_ERROR, &serror);
2218 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2219 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2220 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2221 }
2222#endif
31961943 2223
bdd4ddde
JG
2224 if (ata_port_offline(ap)) {
2225 *class = ATA_DEV_NONE;
20f733e7
BR
2226 return;
2227 }
2228
22374677
JG
2229 /* even after SStatus reflects that device is ready,
2230 * it seems to take a while for link to be fully
2231 * established (and thus Status no longer 0x80/0x7F),
2232 * so we poll a bit for that, here.
2233 */
2234 retry = 20;
2235 while (1) {
2236 u8 drv_stat = ata_check_status(ap);
2237 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2238 break;
bdd4ddde 2239 msleep(500);
22374677
JG
2240 if (retry-- <= 0)
2241 break;
bdd4ddde
JG
2242 if (time_after(jiffies, deadline))
2243 break;
22374677
JG
2244 }
2245
bdd4ddde
JG
2246 /* FIXME: if we passed the deadline, the following
2247 * code probably produces an invalid result
2248 */
20f733e7 2249
bdd4ddde
JG
2250 /* finally, read device signature from TF registers */
2251 *class = ata_dev_try_classify(ap, 0, NULL);
095fec88
JG
2252
2253 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2254
bdd4ddde 2255 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2256
bca1c4eb 2257 VPRINTK("EXIT\n");
20f733e7
BR
2258}
2259
bdd4ddde 2260static int mv_prereset(struct ata_port *ap, unsigned long deadline)
22374677 2261{
bdd4ddde
JG
2262 struct mv_port_priv *pp = ap->private_data;
2263 struct ata_eh_context *ehc = &ap->eh_context;
2264 int rc;
0ea9e179 2265
bdd4ddde
JG
2266 rc = mv_stop_dma(ap);
2267 if (rc)
2268 ehc->i.action |= ATA_EH_HARDRESET;
2269
2270 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2271 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2272 ehc->i.action |= ATA_EH_HARDRESET;
2273 }
2274
2275 /* if we're about to do hardreset, nothing more to do */
2276 if (ehc->i.action & ATA_EH_HARDRESET)
2277 return 0;
2278
2279 if (ata_port_online(ap))
2280 rc = ata_wait_ready(ap, deadline);
2281 else
2282 rc = -ENODEV;
2283
2284 return rc;
22374677
JG
2285}
2286
bdd4ddde
JG
2287static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2288 unsigned long deadline)
31961943 2289{
bdd4ddde 2290 struct mv_host_priv *hpriv = ap->host->private_data;
0d5ff566 2291 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
31961943 2292
bdd4ddde 2293 mv_stop_dma(ap);
31961943 2294
bdd4ddde 2295 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2296
bdd4ddde
JG
2297 mv_phy_reset(ap, class, deadline);
2298
2299 return 0;
2300}
2301
2302static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2303{
2304 u32 serr;
2305
2306 /* print link status */
2307 sata_print_link_status(ap);
31961943 2308
bdd4ddde
JG
2309 /* clear SError */
2310 sata_scr_read(ap, SCR_ERROR, &serr);
2311 sata_scr_write_flush(ap, SCR_ERROR, serr);
2312
2313 /* bail out if no device is present */
2314 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2315 DPRINTK("EXIT, no device\n");
2316 return;
9b358e30 2317 }
bdd4ddde
JG
2318
2319 /* set up device control */
2320 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2321}
2322
2323static void mv_error_handler(struct ata_port *ap)
2324{
2325 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2326 mv_hardreset, mv_postreset);
2327}
2328
2329static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2330{
2331 mv_stop_dma(qc->ap);
2332}
2333
2334static void mv_eh_freeze(struct ata_port *ap)
2335{
2336 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2337 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2338 u32 tmp, mask;
2339 unsigned int shift;
2340
2341 /* FIXME: handle coalescing completion events properly */
2342
2343 shift = ap->port_no * 2;
2344 if (hc > 0)
2345 shift++;
2346
2347 mask = 0x3 << shift;
2348
2349 /* disable assertion of portN err, done events */
2350 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2351 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2352}
2353
2354static void mv_eh_thaw(struct ata_port *ap)
2355{
2356 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2357 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2358 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2359 void __iomem *port_mmio = mv_ap_base(ap);
2360 u32 tmp, mask, hc_irq_cause;
2361 unsigned int shift, hc_port_no = ap->port_no;
2362
2363 /* FIXME: handle coalescing completion events properly */
2364
2365 shift = ap->port_no * 2;
2366 if (hc > 0) {
2367 shift++;
2368 hc_port_no -= 4;
2369 }
2370
2371 mask = 0x3 << shift;
2372
2373 /* clear EDMA errors on this port */
2374 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2375
2376 /* clear pending irq events */
2377 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2378 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2379 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2380 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2381
2382 /* enable assertion of portN err, done events */
2383 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2384 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
31961943
BR
2385}
2386
05b308e1
BR
2387/**
2388 * mv_port_init - Perform some early initialization on a single port.
2389 * @port: libata data structure storing shadow register addresses
2390 * @port_mmio: base address of the port
2391 *
2392 * Initialize shadow register mmio addresses, clear outstanding
2393 * interrupts on the port, and unmask interrupts for the future
2394 * start of the port.
2395 *
2396 * LOCKING:
2397 * Inherited from caller.
2398 */
31961943 2399static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2400{
0d5ff566 2401 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2402 unsigned serr_ofs;
2403
8b260248 2404 /* PIO related setup
31961943
BR
2405 */
2406 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2407 port->error_addr =
31961943
BR
2408 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2409 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2410 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2411 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2412 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2413 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2414 port->status_addr =
31961943
BR
2415 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2416 /* special case: control/altstatus doesn't have ATA_REG_ address */
2417 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2418
2419 /* unused: */
8d9db2d2 2420 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2421
31961943
BR
2422 /* Clear any currently outstanding port interrupt conditions */
2423 serr_ofs = mv_scr_offset(SCR_ERROR);
2424 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2425 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2426
20f733e7 2427 /* unmask all EDMA error interrupts */
31961943 2428 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2429
8b260248 2430 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2431 readl(port_mmio + EDMA_CFG_OFS),
2432 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2433 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2434}
2435
4447d351 2436static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2437{
4447d351
TH
2438 struct pci_dev *pdev = to_pci_dev(host->dev);
2439 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2440 u32 hp_flags = hpriv->hp_flags;
2441
bca1c4eb 2442 switch(board_idx) {
47c2b677
JG
2443 case chip_5080:
2444 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2445 hp_flags |= MV_HP_GEN_I;
47c2b677 2446
44c10138 2447 switch (pdev->revision) {
47c2b677
JG
2448 case 0x1:
2449 hp_flags |= MV_HP_ERRATA_50XXB0;
2450 break;
2451 case 0x3:
2452 hp_flags |= MV_HP_ERRATA_50XXB2;
2453 break;
2454 default:
2455 dev_printk(KERN_WARNING, &pdev->dev,
2456 "Applying 50XXB2 workarounds to unknown rev\n");
2457 hp_flags |= MV_HP_ERRATA_50XXB2;
2458 break;
2459 }
2460 break;
2461
bca1c4eb
JG
2462 case chip_504x:
2463 case chip_508x:
47c2b677 2464 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2465 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2466
44c10138 2467 switch (pdev->revision) {
47c2b677
JG
2468 case 0x0:
2469 hp_flags |= MV_HP_ERRATA_50XXB0;
2470 break;
2471 case 0x3:
2472 hp_flags |= MV_HP_ERRATA_50XXB2;
2473 break;
2474 default:
2475 dev_printk(KERN_WARNING, &pdev->dev,
2476 "Applying B2 workarounds to unknown rev\n");
2477 hp_flags |= MV_HP_ERRATA_50XXB2;
2478 break;
bca1c4eb
JG
2479 }
2480 break;
2481
2482 case chip_604x:
2483 case chip_608x:
47c2b677 2484 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2485 hp_flags |= MV_HP_GEN_II;
47c2b677 2486
44c10138 2487 switch (pdev->revision) {
47c2b677
JG
2488 case 0x7:
2489 hp_flags |= MV_HP_ERRATA_60X1B2;
2490 break;
2491 case 0x9:
2492 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2493 break;
2494 default:
2495 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2496 "Applying B2 workarounds to unknown rev\n");
2497 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2498 break;
2499 }
2500 break;
2501
e4e7b892
JG
2502 case chip_7042:
2503 case chip_6042:
2504 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2505 hp_flags |= MV_HP_GEN_IIE;
2506
44c10138 2507 switch (pdev->revision) {
e4e7b892
JG
2508 case 0x0:
2509 hp_flags |= MV_HP_ERRATA_XX42A0;
2510 break;
2511 case 0x1:
2512 hp_flags |= MV_HP_ERRATA_60X1C0;
2513 break;
2514 default:
2515 dev_printk(KERN_WARNING, &pdev->dev,
2516 "Applying 60X1C0 workarounds to unknown rev\n");
2517 hp_flags |= MV_HP_ERRATA_60X1C0;
2518 break;
2519 }
2520 break;
2521
bca1c4eb
JG
2522 default:
2523 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2524 return 1;
2525 }
2526
2527 hpriv->hp_flags = hp_flags;
2528
2529 return 0;
2530}
2531
05b308e1 2532/**
47c2b677 2533 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2534 * @host: ATA host to initialize
2535 * @board_idx: controller index
05b308e1
BR
2536 *
2537 * If possible, do an early global reset of the host. Then do
2538 * our port init and clear/unmask all/relevant host interrupts.
2539 *
2540 * LOCKING:
2541 * Inherited from caller.
2542 */
4447d351 2543static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2544{
2545 int rc = 0, n_hc, port, hc;
4447d351
TH
2546 struct pci_dev *pdev = to_pci_dev(host->dev);
2547 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2548 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb 2549
47c2b677
JG
2550 /* global interrupt mask */
2551 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2552
4447d351 2553 rc = mv_chip_id(host, board_idx);
bca1c4eb
JG
2554 if (rc)
2555 goto done;
2556
4447d351 2557 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2558
4447d351 2559 for (port = 0; port < host->n_ports; port++)
47c2b677 2560 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2561
c9d39130 2562 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2563 if (rc)
20f733e7 2564 goto done;
20f733e7 2565
522479fb
JG
2566 hpriv->ops->reset_flash(hpriv, mmio);
2567 hpriv->ops->reset_bus(pdev, mmio);
47c2b677 2568 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2569
4447d351 2570 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2571 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2572 void __iomem *port_mmio = mv_port_base(mmio, port);
2573
2a47ce06 2574 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2575 ifctl |= (1 << 7); /* enable gen2i speed */
2576 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2577 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2578 }
2579
c9d39130 2580 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2581 }
2582
4447d351 2583 for (port = 0; port < host->n_ports; port++) {
2a47ce06 2584 void __iomem *port_mmio = mv_port_base(mmio, port);
4447d351 2585 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
20f733e7
BR
2586 }
2587
2588 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2589 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2590
2591 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2592 "(before clear)=0x%08x\n", hc,
2593 readl(hc_mmio + HC_CFG_OFS),
2594 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2595
2596 /* Clear any currently outstanding hc interrupt conditions */
2597 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2598 }
2599
31961943
BR
2600 /* Clear any currently outstanding host interrupt conditions */
2601 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2602
2603 /* and unmask interrupt generation for host regs */
2604 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
fb621e2f 2605
ee9ccdf7 2606 if (IS_GEN_I(hpriv))
fb621e2f
JG
2607 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2608 else
2609 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
2610
2611 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
8b260248 2612 "PCI int cause/mask=0x%08x/0x%08x\n",
20f733e7
BR
2613 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2614 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2615 readl(mmio + PCI_IRQ_CAUSE_OFS),
2616 readl(mmio + PCI_IRQ_MASK_OFS));
bca1c4eb 2617
31961943 2618done:
20f733e7
BR
2619 return rc;
2620}
2621
05b308e1
BR
2622/**
2623 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2624 * @host: ATA host to print info about
05b308e1
BR
2625 *
2626 * FIXME: complete this.
2627 *
2628 * LOCKING:
2629 * Inherited from caller.
2630 */
4447d351 2631static void mv_print_info(struct ata_host *host)
31961943 2632{
4447d351
TH
2633 struct pci_dev *pdev = to_pci_dev(host->dev);
2634 struct mv_host_priv *hpriv = host->private_data;
44c10138 2635 u8 scc;
c1e4fe71 2636 const char *scc_s, *gen;
31961943
BR
2637
2638 /* Use this to determine the HW stepping of the chip so we know
2639 * what errata to workaround
2640 */
31961943
BR
2641 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2642 if (scc == 0)
2643 scc_s = "SCSI";
2644 else if (scc == 0x01)
2645 scc_s = "RAID";
2646 else
c1e4fe71
JG
2647 scc_s = "?";
2648
2649 if (IS_GEN_I(hpriv))
2650 gen = "I";
2651 else if (IS_GEN_II(hpriv))
2652 gen = "II";
2653 else if (IS_GEN_IIE(hpriv))
2654 gen = "IIE";
2655 else
2656 gen = "?";
31961943 2657
a9524a76 2658 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2659 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2660 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2661 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2662}
2663
05b308e1
BR
2664/**
2665 * mv_init_one - handle a positive probe of a Marvell host
2666 * @pdev: PCI device found
2667 * @ent: PCI device ID entry for the matched host
2668 *
2669 * LOCKING:
2670 * Inherited from caller.
2671 */
20f733e7
BR
2672static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2673{
2674 static int printed_version = 0;
20f733e7 2675 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
2676 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2677 struct ata_host *host;
2678 struct mv_host_priv *hpriv;
2679 int n_ports, rc;
20f733e7 2680
a9524a76
JG
2681 if (!printed_version++)
2682 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 2683
4447d351
TH
2684 /* allocate host */
2685 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2686
2687 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2688 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2689 if (!host || !hpriv)
2690 return -ENOMEM;
2691 host->private_data = hpriv;
2692
2693 /* acquire resources */
24dc5f33
TH
2694 rc = pcim_enable_device(pdev);
2695 if (rc)
20f733e7 2696 return rc;
20f733e7 2697
0d5ff566
TH
2698 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2699 if (rc == -EBUSY)
24dc5f33 2700 pcim_pin_device(pdev);
0d5ff566 2701 if (rc)
24dc5f33 2702 return rc;
4447d351 2703 host->iomap = pcim_iomap_table(pdev);
20f733e7 2704
d88184fb
JG
2705 rc = pci_go_64(pdev);
2706 if (rc)
2707 return rc;
2708
20f733e7 2709 /* initialize adapter */
4447d351 2710 rc = mv_init_host(host, board_idx);
24dc5f33
TH
2711 if (rc)
2712 return rc;
20f733e7 2713
31961943 2714 /* Enable interrupts */
6a59dcf8 2715 if (msi && pci_enable_msi(pdev))
31961943 2716 pci_intx(pdev, 1);
20f733e7 2717
31961943 2718 mv_dump_pci_cfg(pdev, 0x68);
4447d351 2719 mv_print_info(host);
20f733e7 2720
4447d351 2721 pci_set_master(pdev);
ea8b4db9 2722 pci_try_set_mwi(pdev);
4447d351 2723 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 2724 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7
BR
2725}
2726
2727static int __init mv_init(void)
2728{
b7887196 2729 return pci_register_driver(&mv_pci_driver);
20f733e7
BR
2730}
2731
2732static void __exit mv_exit(void)
2733{
2734 pci_unregister_driver(&mv_pci_driver);
2735}
2736
2737MODULE_AUTHOR("Brett Russ");
2738MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2739MODULE_LICENSE("GPL");
2740MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2741MODULE_VERSION(DRV_VERSION);
2742
ddef9bb3
JG
2743module_param(msi, int, 0444);
2744MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2745
20f733e7
BR
2746module_init(mv_init);
2747module_exit(mv_exit);