]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/sata_mv.c
Merge branch 'sas-fixes' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/misc-2.6
[net-next-2.6.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
4a05e209
JG
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
4a05e209
JG
38 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
20f733e7
BR
61#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
20f733e7 68#include <linux/dma-mapping.h>
a9524a76 69#include <linux/device.h>
20f733e7 70#include <scsi/scsi_host.h>
193515d5 71#include <scsi/scsi_cmnd.h>
20f733e7 72#include <linux/libata.h>
20f733e7
BR
73
74#define DRV_NAME "sata_mv"
2a3103ce 75#define DRV_VERSION "1.0"
20f733e7
BR
76
77enum {
78 /* BAR's are enumerated in terms of pci_resource_start() terms */
79 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
80 MV_IO_BAR = 2, /* offset 0x18: IO space */
81 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
82
83 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
84 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
85
86 MV_PCI_REG_BASE = 0,
87 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
88 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
89 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
90 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
91 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
92 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
93
20f733e7 94 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 95 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
96 MV_GPIO_PORT_CTL = 0x104f0,
97 MV_RESET_CFG = 0x180d8,
20f733e7
BR
98
99 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
100 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
102 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
103
31961943
BR
104 MV_MAX_Q_DEPTH = 32,
105 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
106
107 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
108 * CRPB needs alignment on a 256B boundary. Size == 256B
109 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
111 */
112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
114 MV_MAX_SG_CT = 176,
115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
116 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
117
20f733e7
BR
118 MV_PORTS_PER_HC = 4,
119 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
120 MV_PORT_HC_SHIFT = 2,
31961943 121 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
122 MV_PORT_MASK = 3,
123
124 /* Host Flags */
125 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
126 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
c5d3e45a 127 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
128 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
129 ATA_FLAG_PIO_POLLING,
47c2b677 130 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 131
31961943
BR
132 CRQB_FLAG_READ = (1 << 0),
133 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
134 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
135 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
136 CRQB_CMD_ADDR_SHIFT = 8,
137 CRQB_CMD_CS = (0x2 << 11),
138 CRQB_CMD_LAST = (1 << 15),
139
140 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
141 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
142 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
143
144 EPRD_FLAG_END_OF_TBL = (1 << 31),
145
20f733e7
BR
146 /* PCI interface registers */
147
31961943
BR
148 PCI_COMMAND_OFS = 0xc00,
149
20f733e7
BR
150 PCI_MAIN_CMD_STS_OFS = 0xd30,
151 STOP_PCI_MASTER = (1 << 2),
152 PCI_MASTER_EMPTY = (1 << 3),
153 GLOB_SFT_RST = (1 << 4),
154
522479fb
JG
155 MV_PCI_MODE = 0xd00,
156 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
157 MV_PCI_DISC_TIMER = 0xd04,
158 MV_PCI_MSI_TRIGGER = 0xc38,
159 MV_PCI_SERR_MASK = 0xc28,
160 MV_PCI_XBAR_TMOUT = 0x1d04,
161 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
162 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
163 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
164 MV_PCI_ERR_COMMAND = 0x1d50,
165
166 PCI_IRQ_CAUSE_OFS = 0x1d58,
167 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
168 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
169
170 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
171 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
172 PORT0_ERR = (1 << 0), /* shift by port # */
173 PORT0_DONE = (1 << 1), /* shift by port # */
174 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
175 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
176 PCI_ERR = (1 << 18),
177 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
178 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
179 PORTS_0_3_COAL_DONE = (1 << 8),
180 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
181 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
182 GPIO_INT = (1 << 22),
183 SELF_INT = (1 << 23),
184 TWSI_INT = (1 << 24),
185 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 186 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
8b260248 187 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
188 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
189 HC_MAIN_RSVD),
fb621e2f
JG
190 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
191 HC_MAIN_RSVD_5),
20f733e7
BR
192
193 /* SATAHC registers */
194 HC_CFG_OFS = 0,
195
196 HC_IRQ_CAUSE_OFS = 0x14,
31961943 197 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
198 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
199 DEV_IRQ = (1 << 8), /* shift by port # */
200
201 /* Shadow block registers */
31961943
BR
202 SHD_BLK_OFS = 0x100,
203 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
204
205 /* SATA registers */
206 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
207 SATA_ACTIVE_OFS = 0x350,
47c2b677 208 PHY_MODE3 = 0x310,
bca1c4eb
JG
209 PHY_MODE4 = 0x314,
210 PHY_MODE2 = 0x330,
c9d39130
JG
211 MV5_PHY_MODE = 0x74,
212 MV5_LT_MODE = 0x30,
213 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
214 SATA_INTERFACE_CTL = 0x050,
215
216 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
217
218 /* Port registers */
219 EDMA_CFG_OFS = 0,
31961943
BR
220 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
221 EDMA_CFG_NCQ = (1 << 5),
222 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
223 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
224 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
225
226 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
227 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
228 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
229 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
230 EDMA_ERR_DEV = (1 << 2), /* device error */
231 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
232 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
233 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
234 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
235 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 236 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 237 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
238 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
239 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
240 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
241 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
242 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
20f733e7 243 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
6c1153e0
JG
244 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
245 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
246 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
247 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
248 EDMA_ERR_OVERRUN_5 = (1 << 5),
249 EDMA_ERR_UNDERRUN_5 = (1 << 6),
bdd4ddde
JG
250 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
251 EDMA_ERR_PRD_PAR |
252 EDMA_ERR_DEV_DCON |
253 EDMA_ERR_DEV_CON |
254 EDMA_ERR_SERR |
255 EDMA_ERR_SELF_DIS |
6c1153e0 256 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
257 EDMA_ERR_CRPB_PAR |
258 EDMA_ERR_INTRL_PAR |
259 EDMA_ERR_IORDY |
260 EDMA_ERR_LNK_CTRL_RX_2 |
261 EDMA_ERR_LNK_DATA_RX |
262 EDMA_ERR_LNK_DATA_TX |
263 EDMA_ERR_TRANS_PROTO,
264 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
265 EDMA_ERR_PRD_PAR |
266 EDMA_ERR_DEV_DCON |
267 EDMA_ERR_DEV_CON |
268 EDMA_ERR_OVERRUN_5 |
269 EDMA_ERR_UNDERRUN_5 |
270 EDMA_ERR_SELF_DIS_5 |
6c1153e0 271 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
272 EDMA_ERR_CRPB_PAR |
273 EDMA_ERR_INTRL_PAR |
274 EDMA_ERR_IORDY,
20f733e7 275
31961943
BR
276 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
277 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
278
279 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
280 EDMA_REQ_Q_PTR_SHIFT = 5,
281
282 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
283 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
284 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
285 EDMA_RSP_Q_PTR_SHIFT = 3,
286
0ea9e179
JG
287 EDMA_CMD_OFS = 0x28, /* EDMA command register */
288 EDMA_EN = (1 << 0), /* enable EDMA */
289 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
290 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 291
c9d39130 292 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 293 EDMA_ARB_CFG = 0x38,
bca1c4eb 294
31961943
BR
295 /* Host private flags (hp_flags) */
296 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
297 MV_HP_ERRATA_50XXB0 = (1 << 1),
298 MV_HP_ERRATA_50XXB2 = (1 << 2),
299 MV_HP_ERRATA_60X1B2 = (1 << 3),
300 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 301 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
302 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
303 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
304 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
20f733e7 305
31961943 306 /* Port private flags (pp_flags) */
0ea9e179
JG
307 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
308 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
309};
310
ee9ccdf7
JG
311#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
312#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 313#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
bca1c4eb 314
095fec88 315enum {
d88184fb 316 MV_DMA_BOUNDARY = 0xffffffffU,
095fec88 317
0ea9e179
JG
318 /* mask of register bits containing lower 32 bits
319 * of EDMA request queue DMA address
320 */
095fec88
JG
321 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
322
0ea9e179 323 /* ditto, for response queue */
095fec88
JG
324 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
325};
326
522479fb
JG
327enum chip_type {
328 chip_504x,
329 chip_508x,
330 chip_5080,
331 chip_604x,
332 chip_608x,
e4e7b892
JG
333 chip_6042,
334 chip_7042,
522479fb
JG
335};
336
31961943
BR
337/* Command ReQuest Block: 32B */
338struct mv_crqb {
e1469874
ML
339 __le32 sg_addr;
340 __le32 sg_addr_hi;
341 __le16 ctrl_flags;
342 __le16 ata_cmd[11];
31961943 343};
20f733e7 344
e4e7b892 345struct mv_crqb_iie {
e1469874
ML
346 __le32 addr;
347 __le32 addr_hi;
348 __le32 flags;
349 __le32 len;
350 __le32 ata_cmd[4];
e4e7b892
JG
351};
352
31961943
BR
353/* Command ResPonse Block: 8B */
354struct mv_crpb {
e1469874
ML
355 __le16 id;
356 __le16 flags;
357 __le32 tmstmp;
20f733e7
BR
358};
359
31961943
BR
360/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
361struct mv_sg {
e1469874
ML
362 __le32 addr;
363 __le32 flags_size;
364 __le32 addr_hi;
365 __le32 reserved;
31961943 366};
20f733e7 367
31961943
BR
368struct mv_port_priv {
369 struct mv_crqb *crqb;
370 dma_addr_t crqb_dma;
371 struct mv_crpb *crpb;
372 dma_addr_t crpb_dma;
373 struct mv_sg *sg_tbl;
374 dma_addr_t sg_tbl_dma;
bdd4ddde
JG
375
376 unsigned int req_idx;
377 unsigned int resp_idx;
378
31961943
BR
379 u32 pp_flags;
380};
381
bca1c4eb
JG
382struct mv_port_signal {
383 u32 amps;
384 u32 pre;
385};
386
47c2b677
JG
387struct mv_host_priv;
388struct mv_hw_ops {
2a47ce06
JG
389 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
390 unsigned int port);
47c2b677
JG
391 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
392 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
393 void __iomem *mmio);
c9d39130
JG
394 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
395 unsigned int n_hc);
522479fb
JG
396 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
397 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
47c2b677
JG
398};
399
31961943
BR
400struct mv_host_priv {
401 u32 hp_flags;
bca1c4eb 402 struct mv_port_signal signal[8];
47c2b677 403 const struct mv_hw_ops *ops;
20f733e7
BR
404};
405
406static void mv_irq_clear(struct ata_port *ap);
da3dbb17
TH
407static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
408static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
409static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
410static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
411static int mv_port_start(struct ata_port *ap);
412static void mv_port_stop(struct ata_port *ap);
413static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 414static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 415static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde
JG
416static void mv_error_handler(struct ata_port *ap);
417static void mv_post_int_cmd(struct ata_queued_cmd *qc);
418static void mv_eh_freeze(struct ata_port *ap);
419static void mv_eh_thaw(struct ata_port *ap);
20f733e7
BR
420static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
421
2a47ce06
JG
422static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
423 unsigned int port);
47c2b677
JG
424static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
425static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
426 void __iomem *mmio);
c9d39130
JG
427static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
428 unsigned int n_hc);
522479fb
JG
429static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
430static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
47c2b677 431
2a47ce06
JG
432static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
433 unsigned int port);
47c2b677
JG
434static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
435static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
436 void __iomem *mmio);
c9d39130
JG
437static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
438 unsigned int n_hc);
522479fb
JG
439static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
440static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
c9d39130
JG
441static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
442 unsigned int port_no);
47c2b677 443
c5d3e45a
JG
444static struct scsi_host_template mv5_sht = {
445 .module = THIS_MODULE,
446 .name = DRV_NAME,
447 .ioctl = ata_scsi_ioctl,
448 .queuecommand = ata_scsi_queuecmd,
449 .can_queue = ATA_DEF_QUEUE,
450 .this_id = ATA_SHT_THIS_ID,
451 .sg_tablesize = MV_MAX_SG_CT,
452 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
453 .emulated = ATA_SHT_EMULATED,
454 .use_clustering = 1,
455 .proc_name = DRV_NAME,
456 .dma_boundary = MV_DMA_BOUNDARY,
457 .slave_configure = ata_scsi_slave_config,
458 .slave_destroy = ata_scsi_slave_destroy,
459 .bios_param = ata_std_bios_param,
460};
461
462static struct scsi_host_template mv6_sht = {
20f733e7
BR
463 .module = THIS_MODULE,
464 .name = DRV_NAME,
465 .ioctl = ata_scsi_ioctl,
466 .queuecommand = ata_scsi_queuecmd,
c5d3e45a 467 .can_queue = ATA_DEF_QUEUE,
20f733e7 468 .this_id = ATA_SHT_THIS_ID,
d88184fb 469 .sg_tablesize = MV_MAX_SG_CT,
20f733e7
BR
470 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
471 .emulated = ATA_SHT_EMULATED,
d88184fb 472 .use_clustering = 1,
20f733e7
BR
473 .proc_name = DRV_NAME,
474 .dma_boundary = MV_DMA_BOUNDARY,
475 .slave_configure = ata_scsi_slave_config,
ccf68c34 476 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 477 .bios_param = ata_std_bios_param,
20f733e7
BR
478};
479
c9d39130
JG
480static const struct ata_port_operations mv5_ops = {
481 .port_disable = ata_port_disable,
482
483 .tf_load = ata_tf_load,
484 .tf_read = ata_tf_read,
485 .check_status = ata_check_status,
486 .exec_command = ata_exec_command,
487 .dev_select = ata_std_dev_select,
488
cffacd85 489 .cable_detect = ata_cable_sata,
c9d39130
JG
490
491 .qc_prep = mv_qc_prep,
492 .qc_issue = mv_qc_issue,
0d5ff566 493 .data_xfer = ata_data_xfer,
c9d39130 494
c9d39130 495 .irq_clear = mv_irq_clear,
246ce3b6
AI
496 .irq_on = ata_irq_on,
497 .irq_ack = ata_irq_ack,
c9d39130 498
bdd4ddde
JG
499 .error_handler = mv_error_handler,
500 .post_internal_cmd = mv_post_int_cmd,
501 .freeze = mv_eh_freeze,
502 .thaw = mv_eh_thaw,
503
c9d39130
JG
504 .scr_read = mv5_scr_read,
505 .scr_write = mv5_scr_write,
506
507 .port_start = mv_port_start,
508 .port_stop = mv_port_stop,
c9d39130
JG
509};
510
511static const struct ata_port_operations mv6_ops = {
20f733e7
BR
512 .port_disable = ata_port_disable,
513
514 .tf_load = ata_tf_load,
515 .tf_read = ata_tf_read,
516 .check_status = ata_check_status,
517 .exec_command = ata_exec_command,
518 .dev_select = ata_std_dev_select,
519
cffacd85 520 .cable_detect = ata_cable_sata,
20f733e7 521
31961943
BR
522 .qc_prep = mv_qc_prep,
523 .qc_issue = mv_qc_issue,
0d5ff566 524 .data_xfer = ata_data_xfer,
20f733e7 525
20f733e7 526 .irq_clear = mv_irq_clear,
246ce3b6
AI
527 .irq_on = ata_irq_on,
528 .irq_ack = ata_irq_ack,
20f733e7 529
bdd4ddde
JG
530 .error_handler = mv_error_handler,
531 .post_internal_cmd = mv_post_int_cmd,
532 .freeze = mv_eh_freeze,
533 .thaw = mv_eh_thaw,
534
20f733e7
BR
535 .scr_read = mv_scr_read,
536 .scr_write = mv_scr_write,
537
31961943
BR
538 .port_start = mv_port_start,
539 .port_stop = mv_port_stop,
20f733e7
BR
540};
541
e4e7b892
JG
542static const struct ata_port_operations mv_iie_ops = {
543 .port_disable = ata_port_disable,
544
545 .tf_load = ata_tf_load,
546 .tf_read = ata_tf_read,
547 .check_status = ata_check_status,
548 .exec_command = ata_exec_command,
549 .dev_select = ata_std_dev_select,
550
cffacd85 551 .cable_detect = ata_cable_sata,
e4e7b892
JG
552
553 .qc_prep = mv_qc_prep_iie,
554 .qc_issue = mv_qc_issue,
0d5ff566 555 .data_xfer = ata_data_xfer,
e4e7b892 556
e4e7b892 557 .irq_clear = mv_irq_clear,
246ce3b6
AI
558 .irq_on = ata_irq_on,
559 .irq_ack = ata_irq_ack,
e4e7b892 560
bdd4ddde
JG
561 .error_handler = mv_error_handler,
562 .post_internal_cmd = mv_post_int_cmd,
563 .freeze = mv_eh_freeze,
564 .thaw = mv_eh_thaw,
565
e4e7b892
JG
566 .scr_read = mv_scr_read,
567 .scr_write = mv_scr_write,
568
569 .port_start = mv_port_start,
570 .port_stop = mv_port_stop,
e4e7b892
JG
571};
572
98ac62de 573static const struct ata_port_info mv_port_info[] = {
20f733e7 574 { /* chip_504x */
cca3974e 575 .flags = MV_COMMON_FLAGS,
31961943 576 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 577 .udma_mask = ATA_UDMA6,
c9d39130 578 .port_ops = &mv5_ops,
20f733e7
BR
579 },
580 { /* chip_508x */
c5d3e45a 581 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 582 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 583 .udma_mask = ATA_UDMA6,
c9d39130 584 .port_ops = &mv5_ops,
20f733e7 585 },
47c2b677 586 { /* chip_5080 */
c5d3e45a 587 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 588 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 589 .udma_mask = ATA_UDMA6,
c9d39130 590 .port_ops = &mv5_ops,
47c2b677 591 },
20f733e7 592 { /* chip_604x */
c5d3e45a 593 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
31961943 594 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 595 .udma_mask = ATA_UDMA6,
c9d39130 596 .port_ops = &mv6_ops,
20f733e7
BR
597 },
598 { /* chip_608x */
c5d3e45a
JG
599 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
600 MV_FLAG_DUAL_HC,
31961943 601 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 602 .udma_mask = ATA_UDMA6,
c9d39130 603 .port_ops = &mv6_ops,
20f733e7 604 },
e4e7b892 605 { /* chip_6042 */
c5d3e45a 606 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 607 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 608 .udma_mask = ATA_UDMA6,
e4e7b892
JG
609 .port_ops = &mv_iie_ops,
610 },
611 { /* chip_7042 */
c5d3e45a 612 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 613 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 614 .udma_mask = ATA_UDMA6,
e4e7b892
JG
615 .port_ops = &mv_iie_ops,
616 },
20f733e7
BR
617};
618
3b7d697d 619static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
620 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
621 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
622 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
623 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
624 /* RocketRAID 1740/174x have different identifiers */
625 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
626 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
627
628 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
629 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
630 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
631 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
632 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
633
634 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
635
d9f9c6bc
FA
636 /* Adaptec 1430SA */
637 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
638
e93f09dc
OJ
639 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
640
6a3d586d
MT
641 /* add Marvell 7042 support */
642 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
643
2d2744fc 644 { } /* terminate list */
20f733e7
BR
645};
646
647static struct pci_driver mv_pci_driver = {
648 .name = DRV_NAME,
649 .id_table = mv_pci_tbl,
650 .probe = mv_init_one,
651 .remove = ata_pci_remove_one,
652};
653
47c2b677
JG
654static const struct mv_hw_ops mv5xxx_ops = {
655 .phy_errata = mv5_phy_errata,
656 .enable_leds = mv5_enable_leds,
657 .read_preamp = mv5_read_preamp,
658 .reset_hc = mv5_reset_hc,
522479fb
JG
659 .reset_flash = mv5_reset_flash,
660 .reset_bus = mv5_reset_bus,
47c2b677
JG
661};
662
663static const struct mv_hw_ops mv6xxx_ops = {
664 .phy_errata = mv6_phy_errata,
665 .enable_leds = mv6_enable_leds,
666 .read_preamp = mv6_read_preamp,
667 .reset_hc = mv6_reset_hc,
522479fb
JG
668 .reset_flash = mv6_reset_flash,
669 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
670};
671
ddef9bb3
JG
672/*
673 * module options
674 */
675static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
676
677
d88184fb
JG
678/* move to PCI layer or libata core? */
679static int pci_go_64(struct pci_dev *pdev)
680{
681 int rc;
682
683 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
684 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
685 if (rc) {
686 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
687 if (rc) {
688 dev_printk(KERN_ERR, &pdev->dev,
689 "64-bit DMA enable failed\n");
690 return rc;
691 }
692 }
693 } else {
694 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
695 if (rc) {
696 dev_printk(KERN_ERR, &pdev->dev,
697 "32-bit DMA enable failed\n");
698 return rc;
699 }
700 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
701 if (rc) {
702 dev_printk(KERN_ERR, &pdev->dev,
703 "32-bit consistent DMA enable failed\n");
704 return rc;
705 }
706 }
707
708 return rc;
709}
710
20f733e7
BR
711/*
712 * Functions
713 */
714
715static inline void writelfl(unsigned long data, void __iomem *addr)
716{
717 writel(data, addr);
718 (void) readl(addr); /* flush to avoid PCI posted write */
719}
720
20f733e7
BR
721static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
722{
723 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
724}
725
c9d39130
JG
726static inline unsigned int mv_hc_from_port(unsigned int port)
727{
728 return port >> MV_PORT_HC_SHIFT;
729}
730
731static inline unsigned int mv_hardport_from_port(unsigned int port)
732{
733 return port & MV_PORT_MASK;
734}
735
736static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
737 unsigned int port)
738{
739 return mv_hc_base(base, mv_hc_from_port(port));
740}
741
20f733e7
BR
742static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
743{
c9d39130 744 return mv_hc_base_from_port(base, port) +
8b260248 745 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 746 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
747}
748
749static inline void __iomem *mv_ap_base(struct ata_port *ap)
750{
0d5ff566 751 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
20f733e7
BR
752}
753
cca3974e 754static inline int mv_get_hc_count(unsigned long port_flags)
31961943 755{
cca3974e 756 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
757}
758
759static void mv_irq_clear(struct ata_port *ap)
20f733e7 760{
20f733e7
BR
761}
762
c5d3e45a
JG
763static void mv_set_edma_ptrs(void __iomem *port_mmio,
764 struct mv_host_priv *hpriv,
765 struct mv_port_priv *pp)
766{
bdd4ddde
JG
767 u32 index;
768
c5d3e45a
JG
769 /*
770 * initialize request queue
771 */
bdd4ddde
JG
772 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
773
c5d3e45a
JG
774 WARN_ON(pp->crqb_dma & 0x3ff);
775 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 776 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
777 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
778
779 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 780 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
781 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
782 else
bdd4ddde 783 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
784
785 /*
786 * initialize response queue
787 */
bdd4ddde
JG
788 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
789
c5d3e45a
JG
790 WARN_ON(pp->crpb_dma & 0xff);
791 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
792
793 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 794 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
795 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
796 else
bdd4ddde 797 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 798
bdd4ddde 799 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 800 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
801}
802
05b308e1
BR
803/**
804 * mv_start_dma - Enable eDMA engine
805 * @base: port base address
806 * @pp: port private data
807 *
beec7dbc
TH
808 * Verify the local cache of the eDMA state is accurate with a
809 * WARN_ON.
05b308e1
BR
810 *
811 * LOCKING:
812 * Inherited from caller.
813 */
c5d3e45a
JG
814static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
815 struct mv_port_priv *pp)
20f733e7 816{
c5d3e45a 817 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
bdd4ddde
JG
818 /* clear EDMA event indicators, if any */
819 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
820
821 mv_set_edma_ptrs(base, hpriv, pp);
822
afb0edd9
BR
823 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
824 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
825 }
beec7dbc 826 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
20f733e7
BR
827}
828
05b308e1 829/**
0ea9e179 830 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
831 * @ap: ATA channel to manipulate
832 *
beec7dbc
TH
833 * Verify the local cache of the eDMA state is accurate with a
834 * WARN_ON.
05b308e1
BR
835 *
836 * LOCKING:
837 * Inherited from caller.
838 */
0ea9e179 839static int __mv_stop_dma(struct ata_port *ap)
20f733e7 840{
31961943
BR
841 void __iomem *port_mmio = mv_ap_base(ap);
842 struct mv_port_priv *pp = ap->private_data;
31961943 843 u32 reg;
c5d3e45a 844 int i, err = 0;
31961943 845
4537deb5 846 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 847 /* Disable EDMA if active. The disable bit auto clears.
31961943 848 */
31961943
BR
849 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
850 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 851 } else {
beec7dbc 852 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
afb0edd9 853 }
8b260248 854
31961943
BR
855 /* now properly wait for the eDMA to stop */
856 for (i = 1000; i > 0; i--) {
857 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 858 if (!(reg & EDMA_EN))
31961943 859 break;
4537deb5 860
31961943
BR
861 udelay(100);
862 }
863
c5d3e45a 864 if (reg & EDMA_EN) {
f15a1daf 865 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 866 err = -EIO;
31961943 867 }
c5d3e45a
JG
868
869 return err;
20f733e7
BR
870}
871
0ea9e179
JG
872static int mv_stop_dma(struct ata_port *ap)
873{
874 unsigned long flags;
875 int rc;
876
877 spin_lock_irqsave(&ap->host->lock, flags);
878 rc = __mv_stop_dma(ap);
879 spin_unlock_irqrestore(&ap->host->lock, flags);
880
881 return rc;
882}
883
8a70f8dc 884#ifdef ATA_DEBUG
31961943 885static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 886{
31961943
BR
887 int b, w;
888 for (b = 0; b < bytes; ) {
889 DPRINTK("%p: ", start + b);
890 for (w = 0; b < bytes && w < 4; w++) {
891 printk("%08x ",readl(start + b));
892 b += sizeof(u32);
893 }
894 printk("\n");
895 }
31961943 896}
8a70f8dc
JG
897#endif
898
31961943
BR
899static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
900{
901#ifdef ATA_DEBUG
902 int b, w;
903 u32 dw;
904 for (b = 0; b < bytes; ) {
905 DPRINTK("%02x: ", b);
906 for (w = 0; b < bytes && w < 4; w++) {
907 (void) pci_read_config_dword(pdev,b,&dw);
908 printk("%08x ",dw);
909 b += sizeof(u32);
910 }
911 printk("\n");
912 }
913#endif
914}
915static void mv_dump_all_regs(void __iomem *mmio_base, int port,
916 struct pci_dev *pdev)
917{
918#ifdef ATA_DEBUG
8b260248 919 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
920 port >> MV_PORT_HC_SHIFT);
921 void __iomem *port_base;
922 int start_port, num_ports, p, start_hc, num_hcs, hc;
923
924 if (0 > port) {
925 start_hc = start_port = 0;
926 num_ports = 8; /* shld be benign for 4 port devs */
927 num_hcs = 2;
928 } else {
929 start_hc = port >> MV_PORT_HC_SHIFT;
930 start_port = port;
931 num_ports = num_hcs = 1;
932 }
8b260248 933 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
934 num_ports > 1 ? num_ports - 1 : start_port);
935
936 if (NULL != pdev) {
937 DPRINTK("PCI config space regs:\n");
938 mv_dump_pci_cfg(pdev, 0x68);
939 }
940 DPRINTK("PCI regs:\n");
941 mv_dump_mem(mmio_base+0xc00, 0x3c);
942 mv_dump_mem(mmio_base+0xd00, 0x34);
943 mv_dump_mem(mmio_base+0xf00, 0x4);
944 mv_dump_mem(mmio_base+0x1d00, 0x6c);
945 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 946 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
947 DPRINTK("HC regs (HC %i):\n", hc);
948 mv_dump_mem(hc_base, 0x1c);
949 }
950 for (p = start_port; p < start_port + num_ports; p++) {
951 port_base = mv_port_base(mmio_base, p);
952 DPRINTK("EDMA regs (port %i):\n",p);
953 mv_dump_mem(port_base, 0x54);
954 DPRINTK("SATA regs (port %i):\n",p);
955 mv_dump_mem(port_base+0x300, 0x60);
956 }
957#endif
20f733e7
BR
958}
959
960static unsigned int mv_scr_offset(unsigned int sc_reg_in)
961{
962 unsigned int ofs;
963
964 switch (sc_reg_in) {
965 case SCR_STATUS:
966 case SCR_CONTROL:
967 case SCR_ERROR:
968 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
969 break;
970 case SCR_ACTIVE:
971 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
972 break;
973 default:
974 ofs = 0xffffffffU;
975 break;
976 }
977 return ofs;
978}
979
da3dbb17 980static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
981{
982 unsigned int ofs = mv_scr_offset(sc_reg_in);
983
da3dbb17
TH
984 if (ofs != 0xffffffffU) {
985 *val = readl(mv_ap_base(ap) + ofs);
986 return 0;
987 } else
988 return -EINVAL;
20f733e7
BR
989}
990
da3dbb17 991static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
992{
993 unsigned int ofs = mv_scr_offset(sc_reg_in);
994
da3dbb17 995 if (ofs != 0xffffffffU) {
20f733e7 996 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
997 return 0;
998 } else
999 return -EINVAL;
20f733e7
BR
1000}
1001
c5d3e45a
JG
1002static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1003 void __iomem *port_mmio)
e4e7b892
JG
1004{
1005 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1006
1007 /* set up non-NCQ EDMA configuration */
c5d3e45a 1008 cfg &= ~(1 << 9); /* disable eQue */
e4e7b892 1009
e728eabe
JG
1010 if (IS_GEN_I(hpriv)) {
1011 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1012 cfg |= (1 << 8); /* enab config burst size mask */
e728eabe 1013 }
e4e7b892 1014
e728eabe
JG
1015 else if (IS_GEN_II(hpriv)) {
1016 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1017 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
e728eabe
JG
1018 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1019 }
e4e7b892
JG
1020
1021 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1022 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1023 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892
JG
1024 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1025 cfg |= (1 << 18); /* enab early completion */
e728eabe
JG
1026 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1027 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
4537deb5 1028 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
e4e7b892
JG
1029 }
1030
1031 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1032}
1033
05b308e1
BR
1034/**
1035 * mv_port_start - Port specific init/start routine.
1036 * @ap: ATA channel to manipulate
1037 *
1038 * Allocate and point to DMA memory, init port private memory,
1039 * zero indices.
1040 *
1041 * LOCKING:
1042 * Inherited from caller.
1043 */
31961943
BR
1044static int mv_port_start(struct ata_port *ap)
1045{
cca3974e
JG
1046 struct device *dev = ap->host->dev;
1047 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1048 struct mv_port_priv *pp;
1049 void __iomem *port_mmio = mv_ap_base(ap);
1050 void *mem;
1051 dma_addr_t mem_dma;
0ea9e179 1052 unsigned long flags;
24dc5f33 1053 int rc;
31961943 1054
24dc5f33 1055 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1056 if (!pp)
24dc5f33 1057 return -ENOMEM;
31961943 1058
24dc5f33
TH
1059 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1060 GFP_KERNEL);
6037d6bb 1061 if (!mem)
24dc5f33 1062 return -ENOMEM;
31961943
BR
1063 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1064
6037d6bb
JG
1065 rc = ata_pad_alloc(ap, dev);
1066 if (rc)
24dc5f33 1067 return rc;
6037d6bb 1068
8b260248 1069 /* First item in chunk of DMA memory:
31961943
BR
1070 * 32-slot command request table (CRQB), 32 bytes each in size
1071 */
1072 pp->crqb = mem;
1073 pp->crqb_dma = mem_dma;
1074 mem += MV_CRQB_Q_SZ;
1075 mem_dma += MV_CRQB_Q_SZ;
1076
8b260248 1077 /* Second item:
31961943
BR
1078 * 32-slot command response table (CRPB), 8 bytes each in size
1079 */
1080 pp->crpb = mem;
1081 pp->crpb_dma = mem_dma;
1082 mem += MV_CRPB_Q_SZ;
1083 mem_dma += MV_CRPB_Q_SZ;
1084
1085 /* Third item:
1086 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1087 */
1088 pp->sg_tbl = mem;
1089 pp->sg_tbl_dma = mem_dma;
1090
0ea9e179
JG
1091 spin_lock_irqsave(&ap->host->lock, flags);
1092
c5d3e45a 1093 mv_edma_cfg(ap, hpriv, port_mmio);
e4e7b892 1094
c5d3e45a 1095 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1096
0ea9e179
JG
1097 spin_unlock_irqrestore(&ap->host->lock, flags);
1098
31961943
BR
1099 /* Don't turn on EDMA here...do it before DMA commands only. Else
1100 * we'll be unable to send non-data, PIO, etc due to restricted access
1101 * to shadow regs.
1102 */
1103 ap->private_data = pp;
1104 return 0;
1105}
1106
05b308e1
BR
1107/**
1108 * mv_port_stop - Port specific cleanup/stop routine.
1109 * @ap: ATA channel to manipulate
1110 *
1111 * Stop DMA, cleanup port memory.
1112 *
1113 * LOCKING:
cca3974e 1114 * This routine uses the host lock to protect the DMA stop.
05b308e1 1115 */
31961943
BR
1116static void mv_port_stop(struct ata_port *ap)
1117{
31961943 1118 mv_stop_dma(ap);
31961943
BR
1119}
1120
05b308e1
BR
1121/**
1122 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1123 * @qc: queued command whose SG list to source from
1124 *
1125 * Populate the SG list and mark the last entry.
1126 *
1127 * LOCKING:
1128 * Inherited from caller.
1129 */
d88184fb 1130static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1131{
1132 struct mv_port_priv *pp = qc->ap->private_data;
d88184fb 1133 unsigned int n_sg = 0;
972c26bd 1134 struct scatterlist *sg;
d88184fb 1135 struct mv_sg *mv_sg;
31961943 1136
d88184fb 1137 mv_sg = pp->sg_tbl;
972c26bd 1138 ata_for_each_sg(sg, qc) {
d88184fb
JG
1139 dma_addr_t addr = sg_dma_address(sg);
1140 u32 sg_len = sg_dma_len(sg);
22374677 1141
d88184fb
JG
1142 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1143 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1144 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
22374677 1145
d88184fb
JG
1146 if (ata_sg_is_last(sg, qc))
1147 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
22374677 1148
d88184fb
JG
1149 mv_sg++;
1150 n_sg++;
31961943 1151 }
d88184fb
JG
1152
1153 return n_sg;
31961943
BR
1154}
1155
e1469874 1156static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1157{
559eedad 1158 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1159 (last ? CRQB_CMD_LAST : 0);
559eedad 1160 *cmdw = cpu_to_le16(tmp);
31961943
BR
1161}
1162
05b308e1
BR
1163/**
1164 * mv_qc_prep - Host specific command preparation.
1165 * @qc: queued command to prepare
1166 *
1167 * This routine simply redirects to the general purpose routine
1168 * if command is not DMA. Else, it handles prep of the CRQB
1169 * (command request block), does some sanity checking, and calls
1170 * the SG load routine.
1171 *
1172 * LOCKING:
1173 * Inherited from caller.
1174 */
31961943
BR
1175static void mv_qc_prep(struct ata_queued_cmd *qc)
1176{
1177 struct ata_port *ap = qc->ap;
1178 struct mv_port_priv *pp = ap->private_data;
e1469874 1179 __le16 *cw;
31961943
BR
1180 struct ata_taskfile *tf;
1181 u16 flags = 0;
a6432436 1182 unsigned in_index;
31961943 1183
c5d3e45a 1184 if (qc->tf.protocol != ATA_PROT_DMA)
31961943 1185 return;
20f733e7 1186
31961943
BR
1187 /* Fill in command request block
1188 */
e4e7b892 1189 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1190 flags |= CRQB_FLAG_READ;
beec7dbc 1191 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1192 flags |= qc->tag << CRQB_TAG_SHIFT;
4537deb5 1193 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
31961943 1194
bdd4ddde
JG
1195 /* get current queue index from software */
1196 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1197
1198 pp->crqb[in_index].sg_addr =
31961943 1199 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
a6432436 1200 pp->crqb[in_index].sg_addr_hi =
31961943 1201 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
a6432436 1202 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1203
a6432436 1204 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1205 tf = &qc->tf;
1206
1207 /* Sadly, the CRQB cannot accomodate all registers--there are
1208 * only 11 bytes...so we must pick and choose required
1209 * registers based on the command. So, we drop feature and
1210 * hob_feature for [RW] DMA commands, but they are needed for
1211 * NCQ. NCQ will drop hob_nsect.
20f733e7 1212 */
31961943
BR
1213 switch (tf->command) {
1214 case ATA_CMD_READ:
1215 case ATA_CMD_READ_EXT:
1216 case ATA_CMD_WRITE:
1217 case ATA_CMD_WRITE_EXT:
c15d85c8 1218 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1219 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1220 break;
1221#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1222 case ATA_CMD_FPDMA_READ:
1223 case ATA_CMD_FPDMA_WRITE:
8b260248 1224 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1225 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1226 break;
1227#endif /* FIXME: remove this line when NCQ added */
1228 default:
1229 /* The only other commands EDMA supports in non-queued and
1230 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1231 * of which are defined/used by Linux. If we get here, this
1232 * driver needs work.
1233 *
1234 * FIXME: modify libata to give qc_prep a return value and
1235 * return error here.
1236 */
1237 BUG_ON(tf->command);
1238 break;
1239 }
1240 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1241 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1242 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1243 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1244 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1245 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1246 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1247 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1248 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1249
e4e7b892
JG
1250 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1251 return;
1252 mv_fill_sg(qc);
1253}
1254
1255/**
1256 * mv_qc_prep_iie - Host specific command preparation.
1257 * @qc: queued command to prepare
1258 *
1259 * This routine simply redirects to the general purpose routine
1260 * if command is not DMA. Else, it handles prep of the CRQB
1261 * (command request block), does some sanity checking, and calls
1262 * the SG load routine.
1263 *
1264 * LOCKING:
1265 * Inherited from caller.
1266 */
1267static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1268{
1269 struct ata_port *ap = qc->ap;
1270 struct mv_port_priv *pp = ap->private_data;
1271 struct mv_crqb_iie *crqb;
1272 struct ata_taskfile *tf;
a6432436 1273 unsigned in_index;
e4e7b892
JG
1274 u32 flags = 0;
1275
c5d3e45a 1276 if (qc->tf.protocol != ATA_PROT_DMA)
e4e7b892
JG
1277 return;
1278
e4e7b892
JG
1279 /* Fill in Gen IIE command request block
1280 */
1281 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1282 flags |= CRQB_FLAG_READ;
1283
beec7dbc 1284 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1285 flags |= qc->tag << CRQB_TAG_SHIFT;
bdd4ddde 1286 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
4537deb5 1287 what we use as our tag */
e4e7b892 1288
bdd4ddde
JG
1289 /* get current queue index from software */
1290 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1291
1292 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
e4e7b892
JG
1293 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1294 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1295 crqb->flags = cpu_to_le32(flags);
1296
1297 tf = &qc->tf;
1298 crqb->ata_cmd[0] = cpu_to_le32(
1299 (tf->command << 16) |
1300 (tf->feature << 24)
1301 );
1302 crqb->ata_cmd[1] = cpu_to_le32(
1303 (tf->lbal << 0) |
1304 (tf->lbam << 8) |
1305 (tf->lbah << 16) |
1306 (tf->device << 24)
1307 );
1308 crqb->ata_cmd[2] = cpu_to_le32(
1309 (tf->hob_lbal << 0) |
1310 (tf->hob_lbam << 8) |
1311 (tf->hob_lbah << 16) |
1312 (tf->hob_feature << 24)
1313 );
1314 crqb->ata_cmd[3] = cpu_to_le32(
1315 (tf->nsect << 0) |
1316 (tf->hob_nsect << 8)
1317 );
1318
1319 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1320 return;
31961943
BR
1321 mv_fill_sg(qc);
1322}
1323
05b308e1
BR
1324/**
1325 * mv_qc_issue - Initiate a command to the host
1326 * @qc: queued command to start
1327 *
1328 * This routine simply redirects to the general purpose routine
1329 * if command is not DMA. Else, it sanity checks our local
1330 * caches of the request producer/consumer indices then enables
1331 * DMA and bumps the request producer index.
1332 *
1333 * LOCKING:
1334 * Inherited from caller.
1335 */
9a3d9eb0 1336static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1337{
c5d3e45a
JG
1338 struct ata_port *ap = qc->ap;
1339 void __iomem *port_mmio = mv_ap_base(ap);
1340 struct mv_port_priv *pp = ap->private_data;
1341 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde 1342 u32 in_index;
31961943 1343
c5d3e45a 1344 if (qc->tf.protocol != ATA_PROT_DMA) {
31961943
BR
1345 /* We're about to send a non-EDMA capable command to the
1346 * port. Turn off EDMA so there won't be problems accessing
1347 * shadow block, etc registers.
1348 */
0ea9e179 1349 __mv_stop_dma(ap);
31961943
BR
1350 return ata_qc_issue_prot(qc);
1351 }
1352
bdd4ddde
JG
1353 mv_start_dma(port_mmio, hpriv, pp);
1354
1355 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
31961943 1356
31961943 1357 /* until we do queuing, the queue should be empty at this point */
a6432436
ML
1358 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1359 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
31961943 1360
bdd4ddde 1361 pp->req_idx++;
31961943 1362
bdd4ddde 1363 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1364
1365 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1366 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1367 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1368
1369 return 0;
1370}
1371
05b308e1
BR
1372/**
1373 * mv_err_intr - Handle error interrupts on the port
1374 * @ap: ATA channel to manipulate
9b358e30 1375 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1376 *
1377 * In most cases, just clear the interrupt and move on. However,
1378 * some cases require an eDMA reset, which is done right before
1379 * the COMRESET in mv_phy_reset(). The SERR case requires a
1380 * clear of pending errors in the SATA SERROR register. Finally,
1381 * if the port disabled DMA, update our cached copy to match.
1382 *
1383 * LOCKING:
1384 * Inherited from caller.
1385 */
bdd4ddde 1386static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1387{
1388 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1389 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1390 struct mv_port_priv *pp = ap->private_data;
1391 struct mv_host_priv *hpriv = ap->host->private_data;
1392 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1393 unsigned int action = 0, err_mask = 0;
1394 struct ata_eh_info *ehi = &ap->eh_info;
20f733e7 1395
bdd4ddde 1396 ata_ehi_clear_desc(ehi);
20f733e7 1397
bdd4ddde
JG
1398 if (!edma_enabled) {
1399 /* just a guess: do we need to do this? should we
1400 * expand this, and do it in all cases?
1401 */
81952c54
TH
1402 sata_scr_read(ap, SCR_ERROR, &serr);
1403 sata_scr_write_flush(ap, SCR_ERROR, serr);
20f733e7 1404 }
bdd4ddde
JG
1405
1406 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1407
1408 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1409
1410 /*
1411 * all generations share these EDMA error cause bits
1412 */
1413
1414 if (edma_err_cause & EDMA_ERR_DEV)
1415 err_mask |= AC_ERR_DEV;
1416 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1417 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1418 EDMA_ERR_INTRL_PAR)) {
1419 err_mask |= AC_ERR_ATA_BUS;
1420 action |= ATA_EH_HARDRESET;
b64bbc39 1421 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1422 }
1423 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1424 ata_ehi_hotplugged(ehi);
1425 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1426 "dev disconnect" : "dev connect");
bdd4ddde
JG
1427 }
1428
ee9ccdf7 1429 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1430 eh_freeze_mask = EDMA_EH_FREEZE_5;
1431
1432 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1433 struct mv_port_priv *pp = ap->private_data;
1434 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1435 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1436 }
1437 } else {
1438 eh_freeze_mask = EDMA_EH_FREEZE;
1439
1440 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1441 struct mv_port_priv *pp = ap->private_data;
1442 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1443 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1444 }
1445
1446 if (edma_err_cause & EDMA_ERR_SERR) {
1447 sata_scr_read(ap, SCR_ERROR, &serr);
1448 sata_scr_write_flush(ap, SCR_ERROR, serr);
1449 err_mask = AC_ERR_ATA_BUS;
1450 action |= ATA_EH_HARDRESET;
1451 }
afb0edd9 1452 }
20f733e7
BR
1453
1454 /* Clear EDMA now that SERR cleanup done */
1455 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1456
bdd4ddde
JG
1457 if (!err_mask) {
1458 err_mask = AC_ERR_OTHER;
1459 action |= ATA_EH_HARDRESET;
1460 }
1461
1462 ehi->serror |= serr;
1463 ehi->action |= action;
1464
1465 if (qc)
1466 qc->err_mask |= err_mask;
1467 else
1468 ehi->err_mask |= err_mask;
1469
1470 if (edma_err_cause & eh_freeze_mask)
1471 ata_port_freeze(ap);
1472 else
1473 ata_port_abort(ap);
1474}
1475
1476static void mv_intr_pio(struct ata_port *ap)
1477{
1478 struct ata_queued_cmd *qc;
1479 u8 ata_status;
1480
1481 /* ignore spurious intr if drive still BUSY */
1482 ata_status = readb(ap->ioaddr.status_addr);
1483 if (unlikely(ata_status & ATA_BUSY))
1484 return;
1485
1486 /* get active ATA command */
1487 qc = ata_qc_from_tag(ap, ap->active_tag);
1488 if (unlikely(!qc)) /* no active tag */
1489 return;
1490 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1491 return;
1492
1493 /* and finally, complete the ATA command */
1494 qc->err_mask |= ac_err_mask(ata_status);
1495 ata_qc_complete(qc);
1496}
1497
1498static void mv_intr_edma(struct ata_port *ap)
1499{
1500 void __iomem *port_mmio = mv_ap_base(ap);
1501 struct mv_host_priv *hpriv = ap->host->private_data;
1502 struct mv_port_priv *pp = ap->private_data;
1503 struct ata_queued_cmd *qc;
1504 u32 out_index, in_index;
1505 bool work_done = false;
1506
1507 /* get h/w response queue pointer */
1508 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1509 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1510
1511 while (1) {
1512 u16 status;
6c1153e0 1513 unsigned int tag;
bdd4ddde
JG
1514
1515 /* get s/w response queue last-read pointer, and compare */
1516 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1517 if (in_index == out_index)
1518 break;
1519
bdd4ddde 1520 /* 50xx: get active ATA command */
0ea9e179 1521 if (IS_GEN_I(hpriv))
6c1153e0 1522 tag = ap->active_tag;
bdd4ddde 1523
6c1153e0
JG
1524 /* Gen II/IIE: get active ATA command via tag, to enable
1525 * support for queueing. this works transparently for
1526 * queued and non-queued modes.
bdd4ddde 1527 */
6c1153e0
JG
1528 else if (IS_GEN_II(hpriv))
1529 tag = (le16_to_cpu(pp->crpb[out_index].id)
1530 >> CRPB_IOID_SHIFT_6) & 0x3f;
bdd4ddde 1531
6c1153e0
JG
1532 else /* IS_GEN_IIE */
1533 tag = (le16_to_cpu(pp->crpb[out_index].id)
1534 >> CRPB_IOID_SHIFT_7) & 0x3f;
bdd4ddde 1535
6c1153e0 1536 qc = ata_qc_from_tag(ap, tag);
bdd4ddde
JG
1537
1538 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1539 * bits (WARNING: might not necessarily be associated
1540 * with this command), which -should- be clear
1541 * if all is well
1542 */
1543 status = le16_to_cpu(pp->crpb[out_index].flags);
1544 if (unlikely(status & 0xff)) {
1545 mv_err_intr(ap, qc);
1546 return;
1547 }
1548
1549 /* and finally, complete the ATA command */
1550 if (qc) {
1551 qc->err_mask |=
1552 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1553 ata_qc_complete(qc);
1554 }
1555
0ea9e179 1556 /* advance software response queue pointer, to
bdd4ddde
JG
1557 * indicate (after the loop completes) to hardware
1558 * that we have consumed a response queue entry.
1559 */
1560 work_done = true;
1561 pp->resp_idx++;
1562 }
1563
1564 if (work_done)
1565 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1566 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1567 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1568}
1569
05b308e1
BR
1570/**
1571 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1572 * @host: host specific structure
05b308e1
BR
1573 * @relevant: port error bits relevant to this host controller
1574 * @hc: which host controller we're to look at
1575 *
1576 * Read then write clear the HC interrupt status then walk each
1577 * port connected to the HC and see if it needs servicing. Port
1578 * success ints are reported in the HC interrupt status reg, the
1579 * port error ints are reported in the higher level main
1580 * interrupt status register and thus are passed in via the
1581 * 'relevant' argument.
1582 *
1583 * LOCKING:
1584 * Inherited from caller.
1585 */
cca3974e 1586static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1587{
0d5ff566 1588 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7 1589 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1590 u32 hc_irq_cause;
c5d3e45a 1591 int port, port0;
20f733e7 1592
35177265 1593 if (hc == 0)
20f733e7 1594 port0 = 0;
35177265 1595 else
20f733e7 1596 port0 = MV_PORTS_PER_HC;
20f733e7
BR
1597
1598 /* we'll need the HC success int register in most cases */
1599 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1600 if (!hc_irq_cause)
1601 return;
1602
1603 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1604
1605 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1606 hc,relevant,hc_irq_cause);
1607
1608 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
cca3974e 1609 struct ata_port *ap = host->ports[port];
63af2a5c 1610 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1611 int have_err_bits, hard_port, shift;
55d8ca4f 1612
bdd4ddde 1613 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1614 continue;
1615
31961943 1616 shift = port << 1; /* (port * 2) */
20f733e7
BR
1617 if (port >= MV_PORTS_PER_HC) {
1618 shift++; /* skip bit 8 in the HC Main IRQ reg */
1619 }
bdd4ddde
JG
1620 have_err_bits = ((PORT0_ERR << shift) & relevant);
1621
1622 if (unlikely(have_err_bits)) {
1623 struct ata_queued_cmd *qc;
8b260248 1624
20f733e7 1625 qc = ata_qc_from_tag(ap, ap->active_tag);
bdd4ddde
JG
1626 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1627 continue;
1628
1629 mv_err_intr(ap, qc);
1630 continue;
1631 }
1632
1633 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1634
1635 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1636 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1637 mv_intr_edma(ap);
1638 } else {
1639 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1640 mv_intr_pio(ap);
20f733e7
BR
1641 }
1642 }
1643 VPRINTK("EXIT\n");
1644}
1645
bdd4ddde
JG
1646static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1647{
1648 struct ata_port *ap;
1649 struct ata_queued_cmd *qc;
1650 struct ata_eh_info *ehi;
1651 unsigned int i, err_mask, printed = 0;
1652 u32 err_cause;
1653
1654 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1655
1656 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1657 err_cause);
1658
1659 DPRINTK("All regs @ PCI error\n");
1660 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1661
1662 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1663
1664 for (i = 0; i < host->n_ports; i++) {
1665 ap = host->ports[i];
1666 if (!ata_port_offline(ap)) {
1667 ehi = &ap->eh_info;
1668 ata_ehi_clear_desc(ehi);
1669 if (!printed++)
1670 ata_ehi_push_desc(ehi,
1671 "PCI err cause 0x%08x", err_cause);
1672 err_mask = AC_ERR_HOST_BUS;
1673 ehi->action = ATA_EH_HARDRESET;
1674 qc = ata_qc_from_tag(ap, ap->active_tag);
1675 if (qc)
1676 qc->err_mask |= err_mask;
1677 else
1678 ehi->err_mask |= err_mask;
1679
1680 ata_port_freeze(ap);
1681 }
1682 }
1683}
1684
05b308e1 1685/**
c5d3e45a 1686 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1687 * @irq: unused
1688 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1689 *
1690 * Read the read only register to determine if any host
1691 * controllers have pending interrupts. If so, call lower level
1692 * routine to handle. Also check for PCI errors which are only
1693 * reported here.
1694 *
8b260248 1695 * LOCKING:
cca3974e 1696 * This routine holds the host lock while processing pending
05b308e1
BR
1697 * interrupts.
1698 */
7d12e780 1699static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1700{
cca3974e 1701 struct ata_host *host = dev_instance;
20f733e7 1702 unsigned int hc, handled = 0, n_hcs;
0d5ff566 1703 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7
BR
1704 u32 irq_stat;
1705
20f733e7 1706 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
20f733e7
BR
1707
1708 /* check the cases where we either have nothing pending or have read
1709 * a bogus register value which can indicate HW removal or PCI fault
1710 */
35177265 1711 if (!irq_stat || (0xffffffffU == irq_stat))
20f733e7 1712 return IRQ_NONE;
20f733e7 1713
cca3974e
JG
1714 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1715 spin_lock(&host->lock);
20f733e7 1716
bdd4ddde
JG
1717 if (unlikely(irq_stat & PCI_ERR)) {
1718 mv_pci_error(host, mmio);
1719 handled = 1;
1720 goto out_unlock; /* skip all other HC irq handling */
1721 }
1722
20f733e7
BR
1723 for (hc = 0; hc < n_hcs; hc++) {
1724 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1725 if (relevant) {
cca3974e 1726 mv_host_intr(host, relevant, hc);
bdd4ddde 1727 handled = 1;
20f733e7
BR
1728 }
1729 }
615ab953 1730
bdd4ddde 1731out_unlock:
cca3974e 1732 spin_unlock(&host->lock);
20f733e7
BR
1733
1734 return IRQ_RETVAL(handled);
1735}
1736
c9d39130
JG
1737static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1738{
1739 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1740 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1741
1742 return hc_mmio + ofs;
1743}
1744
1745static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1746{
1747 unsigned int ofs;
1748
1749 switch (sc_reg_in) {
1750 case SCR_STATUS:
1751 case SCR_ERROR:
1752 case SCR_CONTROL:
1753 ofs = sc_reg_in * sizeof(u32);
1754 break;
1755 default:
1756 ofs = 0xffffffffU;
1757 break;
1758 }
1759 return ofs;
1760}
1761
da3dbb17 1762static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1763{
0d5ff566
TH
1764 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1765 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1766 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1767
da3dbb17
TH
1768 if (ofs != 0xffffffffU) {
1769 *val = readl(addr + ofs);
1770 return 0;
1771 } else
1772 return -EINVAL;
c9d39130
JG
1773}
1774
da3dbb17 1775static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1776{
0d5ff566
TH
1777 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1778 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1779 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1780
da3dbb17 1781 if (ofs != 0xffffffffU) {
0d5ff566 1782 writelfl(val, addr + ofs);
da3dbb17
TH
1783 return 0;
1784 } else
1785 return -EINVAL;
c9d39130
JG
1786}
1787
522479fb
JG
1788static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1789{
522479fb
JG
1790 int early_5080;
1791
44c10138 1792 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1793
1794 if (!early_5080) {
1795 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1796 tmp |= (1 << 0);
1797 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1798 }
1799
1800 mv_reset_pci_bus(pdev, mmio);
1801}
1802
1803static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1804{
1805 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1806}
1807
47c2b677 1808static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1809 void __iomem *mmio)
1810{
c9d39130
JG
1811 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1812 u32 tmp;
1813
1814 tmp = readl(phy_mmio + MV5_PHY_MODE);
1815
1816 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1817 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1818}
1819
47c2b677 1820static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1821{
522479fb
JG
1822 u32 tmp;
1823
1824 writel(0, mmio + MV_GPIO_PORT_CTL);
1825
1826 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1827
1828 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1829 tmp |= ~(1 << 0);
1830 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1831}
1832
2a47ce06
JG
1833static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1834 unsigned int port)
bca1c4eb 1835{
c9d39130
JG
1836 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1837 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1838 u32 tmp;
1839 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1840
1841 if (fix_apm_sq) {
1842 tmp = readl(phy_mmio + MV5_LT_MODE);
1843 tmp |= (1 << 19);
1844 writel(tmp, phy_mmio + MV5_LT_MODE);
1845
1846 tmp = readl(phy_mmio + MV5_PHY_CTL);
1847 tmp &= ~0x3;
1848 tmp |= 0x1;
1849 writel(tmp, phy_mmio + MV5_PHY_CTL);
1850 }
1851
1852 tmp = readl(phy_mmio + MV5_PHY_MODE);
1853 tmp &= ~mask;
1854 tmp |= hpriv->signal[port].pre;
1855 tmp |= hpriv->signal[port].amps;
1856 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1857}
1858
c9d39130
JG
1859
1860#undef ZERO
1861#define ZERO(reg) writel(0, port_mmio + (reg))
1862static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1863 unsigned int port)
1864{
1865 void __iomem *port_mmio = mv_port_base(mmio, port);
1866
1867 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1868
1869 mv_channel_reset(hpriv, mmio, port);
1870
1871 ZERO(0x028); /* command */
1872 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1873 ZERO(0x004); /* timer */
1874 ZERO(0x008); /* irq err cause */
1875 ZERO(0x00c); /* irq err mask */
1876 ZERO(0x010); /* rq bah */
1877 ZERO(0x014); /* rq inp */
1878 ZERO(0x018); /* rq outp */
1879 ZERO(0x01c); /* respq bah */
1880 ZERO(0x024); /* respq outp */
1881 ZERO(0x020); /* respq inp */
1882 ZERO(0x02c); /* test control */
1883 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1884}
1885#undef ZERO
1886
1887#define ZERO(reg) writel(0, hc_mmio + (reg))
1888static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1889 unsigned int hc)
47c2b677 1890{
c9d39130
JG
1891 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1892 u32 tmp;
1893
1894 ZERO(0x00c);
1895 ZERO(0x010);
1896 ZERO(0x014);
1897 ZERO(0x018);
1898
1899 tmp = readl(hc_mmio + 0x20);
1900 tmp &= 0x1c1c1c1c;
1901 tmp |= 0x03030303;
1902 writel(tmp, hc_mmio + 0x20);
1903}
1904#undef ZERO
1905
1906static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1907 unsigned int n_hc)
1908{
1909 unsigned int hc, port;
1910
1911 for (hc = 0; hc < n_hc; hc++) {
1912 for (port = 0; port < MV_PORTS_PER_HC; port++)
1913 mv5_reset_hc_port(hpriv, mmio,
1914 (hc * MV_PORTS_PER_HC) + port);
1915
1916 mv5_reset_one_hc(hpriv, mmio, hc);
1917 }
1918
1919 return 0;
47c2b677
JG
1920}
1921
101ffae2
JG
1922#undef ZERO
1923#define ZERO(reg) writel(0, mmio + (reg))
1924static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1925{
1926 u32 tmp;
1927
1928 tmp = readl(mmio + MV_PCI_MODE);
1929 tmp &= 0xff00ffff;
1930 writel(tmp, mmio + MV_PCI_MODE);
1931
1932 ZERO(MV_PCI_DISC_TIMER);
1933 ZERO(MV_PCI_MSI_TRIGGER);
1934 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1935 ZERO(HC_MAIN_IRQ_MASK_OFS);
1936 ZERO(MV_PCI_SERR_MASK);
1937 ZERO(PCI_IRQ_CAUSE_OFS);
1938 ZERO(PCI_IRQ_MASK_OFS);
1939 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1940 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1941 ZERO(MV_PCI_ERR_ATTRIBUTE);
1942 ZERO(MV_PCI_ERR_COMMAND);
1943}
1944#undef ZERO
1945
1946static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1947{
1948 u32 tmp;
1949
1950 mv5_reset_flash(hpriv, mmio);
1951
1952 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1953 tmp &= 0x3;
1954 tmp |= (1 << 5) | (1 << 6);
1955 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1956}
1957
1958/**
1959 * mv6_reset_hc - Perform the 6xxx global soft reset
1960 * @mmio: base address of the HBA
1961 *
1962 * This routine only applies to 6xxx parts.
1963 *
1964 * LOCKING:
1965 * Inherited from caller.
1966 */
c9d39130
JG
1967static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1968 unsigned int n_hc)
101ffae2
JG
1969{
1970 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1971 int i, rc = 0;
1972 u32 t;
1973
1974 /* Following procedure defined in PCI "main command and status
1975 * register" table.
1976 */
1977 t = readl(reg);
1978 writel(t | STOP_PCI_MASTER, reg);
1979
1980 for (i = 0; i < 1000; i++) {
1981 udelay(1);
1982 t = readl(reg);
1983 if (PCI_MASTER_EMPTY & t) {
1984 break;
1985 }
1986 }
1987 if (!(PCI_MASTER_EMPTY & t)) {
1988 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1989 rc = 1;
1990 goto done;
1991 }
1992
1993 /* set reset */
1994 i = 5;
1995 do {
1996 writel(t | GLOB_SFT_RST, reg);
1997 t = readl(reg);
1998 udelay(1);
1999 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2000
2001 if (!(GLOB_SFT_RST & t)) {
2002 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2003 rc = 1;
2004 goto done;
2005 }
2006
2007 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2008 i = 5;
2009 do {
2010 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2011 t = readl(reg);
2012 udelay(1);
2013 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2014
2015 if (GLOB_SFT_RST & t) {
2016 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2017 rc = 1;
2018 }
2019done:
2020 return rc;
2021}
2022
47c2b677 2023static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2024 void __iomem *mmio)
2025{
2026 void __iomem *port_mmio;
2027 u32 tmp;
2028
ba3fe8fb
JG
2029 tmp = readl(mmio + MV_RESET_CFG);
2030 if ((tmp & (1 << 0)) == 0) {
47c2b677 2031 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2032 hpriv->signal[idx].pre = 0x1 << 5;
2033 return;
2034 }
2035
2036 port_mmio = mv_port_base(mmio, idx);
2037 tmp = readl(port_mmio + PHY_MODE2);
2038
2039 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2040 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2041}
2042
47c2b677 2043static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2044{
47c2b677 2045 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2046}
2047
c9d39130 2048static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2049 unsigned int port)
bca1c4eb 2050{
c9d39130
JG
2051 void __iomem *port_mmio = mv_port_base(mmio, port);
2052
bca1c4eb 2053 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2054 int fix_phy_mode2 =
2055 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2056 int fix_phy_mode4 =
47c2b677
JG
2057 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2058 u32 m2, tmp;
2059
2060 if (fix_phy_mode2) {
2061 m2 = readl(port_mmio + PHY_MODE2);
2062 m2 &= ~(1 << 16);
2063 m2 |= (1 << 31);
2064 writel(m2, port_mmio + PHY_MODE2);
2065
2066 udelay(200);
2067
2068 m2 = readl(port_mmio + PHY_MODE2);
2069 m2 &= ~((1 << 16) | (1 << 31));
2070 writel(m2, port_mmio + PHY_MODE2);
2071
2072 udelay(200);
2073 }
2074
2075 /* who knows what this magic does */
2076 tmp = readl(port_mmio + PHY_MODE3);
2077 tmp &= ~0x7F800000;
2078 tmp |= 0x2A800000;
2079 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2080
2081 if (fix_phy_mode4) {
47c2b677 2082 u32 m4;
bca1c4eb
JG
2083
2084 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2085
2086 if (hp_flags & MV_HP_ERRATA_60X1B2)
2087 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2088
2089 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2090
2091 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2092
2093 if (hp_flags & MV_HP_ERRATA_60X1B2)
2094 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2095 }
2096
2097 /* Revert values of pre-emphasis and signal amps to the saved ones */
2098 m2 = readl(port_mmio + PHY_MODE2);
2099
2100 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2101 m2 |= hpriv->signal[port].amps;
2102 m2 |= hpriv->signal[port].pre;
47c2b677 2103 m2 &= ~(1 << 16);
bca1c4eb 2104
e4e7b892
JG
2105 /* according to mvSata 3.6.1, some IIE values are fixed */
2106 if (IS_GEN_IIE(hpriv)) {
2107 m2 &= ~0xC30FF01F;
2108 m2 |= 0x0000900F;
2109 }
2110
bca1c4eb
JG
2111 writel(m2, port_mmio + PHY_MODE2);
2112}
2113
c9d39130
JG
2114static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2115 unsigned int port_no)
2116{
2117 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2118
2119 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2120
ee9ccdf7 2121 if (IS_GEN_II(hpriv)) {
c9d39130 2122 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2123 ifctl |= (1 << 7); /* enable gen2i speed */
2124 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2125 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2126 }
2127
2128 udelay(25); /* allow reset propagation */
2129
2130 /* Spec never mentions clearing the bit. Marvell's driver does
2131 * clear the bit, however.
2132 */
2133 writelfl(0, port_mmio + EDMA_CMD_OFS);
2134
2135 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2136
ee9ccdf7 2137 if (IS_GEN_I(hpriv))
c9d39130
JG
2138 mdelay(1);
2139}
2140
05b308e1 2141/**
bdd4ddde 2142 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2143 * @ap: ATA channel to manipulate
2144 *
2145 * Part of this is taken from __sata_phy_reset and modified to
2146 * not sleep since this routine gets called from interrupt level.
2147 *
2148 * LOCKING:
2149 * Inherited from caller. This is coded to safe to call at
2150 * interrupt level, i.e. it does not sleep.
31961943 2151 */
bdd4ddde
JG
2152static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2153 unsigned long deadline)
20f733e7 2154{
095fec88 2155 struct mv_port_priv *pp = ap->private_data;
cca3974e 2156 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2157 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2158 int retry = 5;
2159 u32 sstatus;
20f733e7
BR
2160
2161 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2162
da3dbb17
TH
2163#ifdef DEBUG
2164 {
2165 u32 sstatus, serror, scontrol;
2166
2167 mv_scr_read(ap, SCR_STATUS, &sstatus);
2168 mv_scr_read(ap, SCR_ERROR, &serror);
2169 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2170 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2171 "SCtrl 0x%08x\n", status, serror, scontrol);
2172 }
2173#endif
20f733e7 2174
22374677
JG
2175 /* Issue COMRESET via SControl */
2176comreset_retry:
81952c54 2177 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
bdd4ddde 2178 msleep(1);
22374677 2179
81952c54 2180 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
bdd4ddde 2181 msleep(20);
22374677 2182
31961943 2183 do {
81952c54 2184 sata_scr_read(ap, SCR_STATUS, &sstatus);
62f1d0e6 2185 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2186 break;
22374677 2187
bdd4ddde 2188 msleep(1);
c5d3e45a 2189 } while (time_before(jiffies, deadline));
20f733e7 2190
22374677 2191 /* work around errata */
ee9ccdf7 2192 if (IS_GEN_II(hpriv) &&
22374677
JG
2193 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2194 (retry-- > 0))
2195 goto comreset_retry;
095fec88 2196
da3dbb17
TH
2197#ifdef DEBUG
2198 {
2199 u32 sstatus, serror, scontrol;
2200
2201 mv_scr_read(ap, SCR_STATUS, &sstatus);
2202 mv_scr_read(ap, SCR_ERROR, &serror);
2203 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2204 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2205 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2206 }
2207#endif
31961943 2208
bdd4ddde
JG
2209 if (ata_port_offline(ap)) {
2210 *class = ATA_DEV_NONE;
20f733e7
BR
2211 return;
2212 }
2213
22374677
JG
2214 /* even after SStatus reflects that device is ready,
2215 * it seems to take a while for link to be fully
2216 * established (and thus Status no longer 0x80/0x7F),
2217 * so we poll a bit for that, here.
2218 */
2219 retry = 20;
2220 while (1) {
2221 u8 drv_stat = ata_check_status(ap);
2222 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2223 break;
bdd4ddde 2224 msleep(500);
22374677
JG
2225 if (retry-- <= 0)
2226 break;
bdd4ddde
JG
2227 if (time_after(jiffies, deadline))
2228 break;
22374677
JG
2229 }
2230
bdd4ddde
JG
2231 /* FIXME: if we passed the deadline, the following
2232 * code probably produces an invalid result
2233 */
20f733e7 2234
bdd4ddde
JG
2235 /* finally, read device signature from TF registers */
2236 *class = ata_dev_try_classify(ap, 0, NULL);
095fec88
JG
2237
2238 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2239
bdd4ddde 2240 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2241
bca1c4eb 2242 VPRINTK("EXIT\n");
20f733e7
BR
2243}
2244
bdd4ddde 2245static int mv_prereset(struct ata_port *ap, unsigned long deadline)
22374677 2246{
bdd4ddde
JG
2247 struct mv_port_priv *pp = ap->private_data;
2248 struct ata_eh_context *ehc = &ap->eh_context;
2249 int rc;
0ea9e179 2250
bdd4ddde
JG
2251 rc = mv_stop_dma(ap);
2252 if (rc)
2253 ehc->i.action |= ATA_EH_HARDRESET;
2254
2255 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2256 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2257 ehc->i.action |= ATA_EH_HARDRESET;
2258 }
2259
2260 /* if we're about to do hardreset, nothing more to do */
2261 if (ehc->i.action & ATA_EH_HARDRESET)
2262 return 0;
2263
2264 if (ata_port_online(ap))
2265 rc = ata_wait_ready(ap, deadline);
2266 else
2267 rc = -ENODEV;
2268
2269 return rc;
22374677
JG
2270}
2271
bdd4ddde
JG
2272static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2273 unsigned long deadline)
31961943 2274{
bdd4ddde 2275 struct mv_host_priv *hpriv = ap->host->private_data;
0d5ff566 2276 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
31961943 2277
bdd4ddde 2278 mv_stop_dma(ap);
31961943 2279
bdd4ddde 2280 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2281
bdd4ddde
JG
2282 mv_phy_reset(ap, class, deadline);
2283
2284 return 0;
2285}
2286
2287static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2288{
2289 u32 serr;
2290
2291 /* print link status */
2292 sata_print_link_status(ap);
31961943 2293
bdd4ddde
JG
2294 /* clear SError */
2295 sata_scr_read(ap, SCR_ERROR, &serr);
2296 sata_scr_write_flush(ap, SCR_ERROR, serr);
2297
2298 /* bail out if no device is present */
2299 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2300 DPRINTK("EXIT, no device\n");
2301 return;
9b358e30 2302 }
bdd4ddde
JG
2303
2304 /* set up device control */
2305 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2306}
2307
2308static void mv_error_handler(struct ata_port *ap)
2309{
2310 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2311 mv_hardreset, mv_postreset);
2312}
2313
2314static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2315{
2316 mv_stop_dma(qc->ap);
2317}
2318
2319static void mv_eh_freeze(struct ata_port *ap)
2320{
2321 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2322 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2323 u32 tmp, mask;
2324 unsigned int shift;
2325
2326 /* FIXME: handle coalescing completion events properly */
2327
2328 shift = ap->port_no * 2;
2329 if (hc > 0)
2330 shift++;
2331
2332 mask = 0x3 << shift;
2333
2334 /* disable assertion of portN err, done events */
2335 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2336 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2337}
2338
2339static void mv_eh_thaw(struct ata_port *ap)
2340{
2341 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2342 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2343 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2344 void __iomem *port_mmio = mv_ap_base(ap);
2345 u32 tmp, mask, hc_irq_cause;
2346 unsigned int shift, hc_port_no = ap->port_no;
2347
2348 /* FIXME: handle coalescing completion events properly */
2349
2350 shift = ap->port_no * 2;
2351 if (hc > 0) {
2352 shift++;
2353 hc_port_no -= 4;
2354 }
2355
2356 mask = 0x3 << shift;
2357
2358 /* clear EDMA errors on this port */
2359 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2360
2361 /* clear pending irq events */
2362 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2363 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2364 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2365 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2366
2367 /* enable assertion of portN err, done events */
2368 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2369 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
31961943
BR
2370}
2371
05b308e1
BR
2372/**
2373 * mv_port_init - Perform some early initialization on a single port.
2374 * @port: libata data structure storing shadow register addresses
2375 * @port_mmio: base address of the port
2376 *
2377 * Initialize shadow register mmio addresses, clear outstanding
2378 * interrupts on the port, and unmask interrupts for the future
2379 * start of the port.
2380 *
2381 * LOCKING:
2382 * Inherited from caller.
2383 */
31961943 2384static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2385{
0d5ff566 2386 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2387 unsigned serr_ofs;
2388
8b260248 2389 /* PIO related setup
31961943
BR
2390 */
2391 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2392 port->error_addr =
31961943
BR
2393 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2394 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2395 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2396 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2397 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2398 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2399 port->status_addr =
31961943
BR
2400 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2401 /* special case: control/altstatus doesn't have ATA_REG_ address */
2402 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2403
2404 /* unused: */
8d9db2d2 2405 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2406
31961943
BR
2407 /* Clear any currently outstanding port interrupt conditions */
2408 serr_ofs = mv_scr_offset(SCR_ERROR);
2409 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2410 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2411
20f733e7 2412 /* unmask all EDMA error interrupts */
31961943 2413 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2414
8b260248 2415 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2416 readl(port_mmio + EDMA_CFG_OFS),
2417 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2418 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2419}
2420
4447d351 2421static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2422{
4447d351
TH
2423 struct pci_dev *pdev = to_pci_dev(host->dev);
2424 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2425 u32 hp_flags = hpriv->hp_flags;
2426
bca1c4eb 2427 switch(board_idx) {
47c2b677
JG
2428 case chip_5080:
2429 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2430 hp_flags |= MV_HP_GEN_I;
47c2b677 2431
44c10138 2432 switch (pdev->revision) {
47c2b677
JG
2433 case 0x1:
2434 hp_flags |= MV_HP_ERRATA_50XXB0;
2435 break;
2436 case 0x3:
2437 hp_flags |= MV_HP_ERRATA_50XXB2;
2438 break;
2439 default:
2440 dev_printk(KERN_WARNING, &pdev->dev,
2441 "Applying 50XXB2 workarounds to unknown rev\n");
2442 hp_flags |= MV_HP_ERRATA_50XXB2;
2443 break;
2444 }
2445 break;
2446
bca1c4eb
JG
2447 case chip_504x:
2448 case chip_508x:
47c2b677 2449 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2450 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2451
44c10138 2452 switch (pdev->revision) {
47c2b677
JG
2453 case 0x0:
2454 hp_flags |= MV_HP_ERRATA_50XXB0;
2455 break;
2456 case 0x3:
2457 hp_flags |= MV_HP_ERRATA_50XXB2;
2458 break;
2459 default:
2460 dev_printk(KERN_WARNING, &pdev->dev,
2461 "Applying B2 workarounds to unknown rev\n");
2462 hp_flags |= MV_HP_ERRATA_50XXB2;
2463 break;
bca1c4eb
JG
2464 }
2465 break;
2466
2467 case chip_604x:
2468 case chip_608x:
47c2b677 2469 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2470 hp_flags |= MV_HP_GEN_II;
47c2b677 2471
44c10138 2472 switch (pdev->revision) {
47c2b677
JG
2473 case 0x7:
2474 hp_flags |= MV_HP_ERRATA_60X1B2;
2475 break;
2476 case 0x9:
2477 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2478 break;
2479 default:
2480 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2481 "Applying B2 workarounds to unknown rev\n");
2482 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2483 break;
2484 }
2485 break;
2486
e4e7b892
JG
2487 case chip_7042:
2488 case chip_6042:
2489 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2490 hp_flags |= MV_HP_GEN_IIE;
2491
44c10138 2492 switch (pdev->revision) {
e4e7b892
JG
2493 case 0x0:
2494 hp_flags |= MV_HP_ERRATA_XX42A0;
2495 break;
2496 case 0x1:
2497 hp_flags |= MV_HP_ERRATA_60X1C0;
2498 break;
2499 default:
2500 dev_printk(KERN_WARNING, &pdev->dev,
2501 "Applying 60X1C0 workarounds to unknown rev\n");
2502 hp_flags |= MV_HP_ERRATA_60X1C0;
2503 break;
2504 }
2505 break;
2506
bca1c4eb
JG
2507 default:
2508 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2509 return 1;
2510 }
2511
2512 hpriv->hp_flags = hp_flags;
2513
2514 return 0;
2515}
2516
05b308e1 2517/**
47c2b677 2518 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2519 * @host: ATA host to initialize
2520 * @board_idx: controller index
05b308e1
BR
2521 *
2522 * If possible, do an early global reset of the host. Then do
2523 * our port init and clear/unmask all/relevant host interrupts.
2524 *
2525 * LOCKING:
2526 * Inherited from caller.
2527 */
4447d351 2528static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2529{
2530 int rc = 0, n_hc, port, hc;
4447d351
TH
2531 struct pci_dev *pdev = to_pci_dev(host->dev);
2532 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2533 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb 2534
47c2b677
JG
2535 /* global interrupt mask */
2536 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2537
4447d351 2538 rc = mv_chip_id(host, board_idx);
bca1c4eb
JG
2539 if (rc)
2540 goto done;
2541
4447d351 2542 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2543
4447d351 2544 for (port = 0; port < host->n_ports; port++)
47c2b677 2545 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2546
c9d39130 2547 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2548 if (rc)
20f733e7 2549 goto done;
20f733e7 2550
522479fb
JG
2551 hpriv->ops->reset_flash(hpriv, mmio);
2552 hpriv->ops->reset_bus(pdev, mmio);
47c2b677 2553 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2554
4447d351 2555 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2556 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2557 void __iomem *port_mmio = mv_port_base(mmio, port);
2558
2a47ce06 2559 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2560 ifctl |= (1 << 7); /* enable gen2i speed */
2561 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2562 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2563 }
2564
c9d39130 2565 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2566 }
2567
4447d351 2568 for (port = 0; port < host->n_ports; port++) {
2a47ce06 2569 void __iomem *port_mmio = mv_port_base(mmio, port);
4447d351 2570 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
20f733e7
BR
2571 }
2572
2573 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2574 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2575
2576 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2577 "(before clear)=0x%08x\n", hc,
2578 readl(hc_mmio + HC_CFG_OFS),
2579 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2580
2581 /* Clear any currently outstanding hc interrupt conditions */
2582 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2583 }
2584
31961943
BR
2585 /* Clear any currently outstanding host interrupt conditions */
2586 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2587
2588 /* and unmask interrupt generation for host regs */
2589 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
fb621e2f 2590
ee9ccdf7 2591 if (IS_GEN_I(hpriv))
fb621e2f
JG
2592 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2593 else
2594 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
2595
2596 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
8b260248 2597 "PCI int cause/mask=0x%08x/0x%08x\n",
20f733e7
BR
2598 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2599 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2600 readl(mmio + PCI_IRQ_CAUSE_OFS),
2601 readl(mmio + PCI_IRQ_MASK_OFS));
bca1c4eb 2602
31961943 2603done:
20f733e7
BR
2604 return rc;
2605}
2606
05b308e1
BR
2607/**
2608 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2609 * @host: ATA host to print info about
05b308e1
BR
2610 *
2611 * FIXME: complete this.
2612 *
2613 * LOCKING:
2614 * Inherited from caller.
2615 */
4447d351 2616static void mv_print_info(struct ata_host *host)
31961943 2617{
4447d351
TH
2618 struct pci_dev *pdev = to_pci_dev(host->dev);
2619 struct mv_host_priv *hpriv = host->private_data;
44c10138 2620 u8 scc;
c1e4fe71 2621 const char *scc_s, *gen;
31961943
BR
2622
2623 /* Use this to determine the HW stepping of the chip so we know
2624 * what errata to workaround
2625 */
31961943
BR
2626 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2627 if (scc == 0)
2628 scc_s = "SCSI";
2629 else if (scc == 0x01)
2630 scc_s = "RAID";
2631 else
c1e4fe71
JG
2632 scc_s = "?";
2633
2634 if (IS_GEN_I(hpriv))
2635 gen = "I";
2636 else if (IS_GEN_II(hpriv))
2637 gen = "II";
2638 else if (IS_GEN_IIE(hpriv))
2639 gen = "IIE";
2640 else
2641 gen = "?";
31961943 2642
a9524a76 2643 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2644 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2645 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2646 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2647}
2648
05b308e1
BR
2649/**
2650 * mv_init_one - handle a positive probe of a Marvell host
2651 * @pdev: PCI device found
2652 * @ent: PCI device ID entry for the matched host
2653 *
2654 * LOCKING:
2655 * Inherited from caller.
2656 */
20f733e7
BR
2657static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2658{
2659 static int printed_version = 0;
20f733e7 2660 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
2661 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2662 struct ata_host *host;
2663 struct mv_host_priv *hpriv;
2664 int n_ports, rc;
20f733e7 2665
a9524a76
JG
2666 if (!printed_version++)
2667 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 2668
4447d351
TH
2669 /* allocate host */
2670 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2671
2672 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2673 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2674 if (!host || !hpriv)
2675 return -ENOMEM;
2676 host->private_data = hpriv;
2677
2678 /* acquire resources */
24dc5f33
TH
2679 rc = pcim_enable_device(pdev);
2680 if (rc)
20f733e7 2681 return rc;
20f733e7 2682
0d5ff566
TH
2683 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2684 if (rc == -EBUSY)
24dc5f33 2685 pcim_pin_device(pdev);
0d5ff566 2686 if (rc)
24dc5f33 2687 return rc;
4447d351 2688 host->iomap = pcim_iomap_table(pdev);
20f733e7 2689
d88184fb
JG
2690 rc = pci_go_64(pdev);
2691 if (rc)
2692 return rc;
2693
20f733e7 2694 /* initialize adapter */
4447d351 2695 rc = mv_init_host(host, board_idx);
24dc5f33
TH
2696 if (rc)
2697 return rc;
20f733e7 2698
31961943 2699 /* Enable interrupts */
6a59dcf8 2700 if (msi && pci_enable_msi(pdev))
31961943 2701 pci_intx(pdev, 1);
20f733e7 2702
31961943 2703 mv_dump_pci_cfg(pdev, 0x68);
4447d351 2704 mv_print_info(host);
20f733e7 2705
4447d351 2706 pci_set_master(pdev);
ea8b4db9 2707 pci_try_set_mwi(pdev);
4447d351 2708 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 2709 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7
BR
2710}
2711
2712static int __init mv_init(void)
2713{
b7887196 2714 return pci_register_driver(&mv_pci_driver);
20f733e7
BR
2715}
2716
2717static void __exit mv_exit(void)
2718{
2719 pci_unregister_driver(&mv_pci_driver);
2720}
2721
2722MODULE_AUTHOR("Brett Russ");
2723MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2724MODULE_LICENSE("GPL");
2725MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2726MODULE_VERSION(DRV_VERSION);
2727
ddef9bb3
JG
2728module_param(msi, int, 0444);
2729MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2730
20f733e7
BR
2731module_init(mv_init);
2732module_exit(mv_exit);